[llvm-branch-commits] [llvm] [RISCV] Add initial support of memcmp expansion (PR #107548)
Pengcheng Wang via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Nov 5 23:41:33 PST 2024
https://github.com/wangpc-pp updated https://github.com/llvm/llvm-project/pull/107548
>From f21cfcfc90330ee3856746b6315a81a00313b0e0 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Fri, 6 Sep 2024 17:20:51 +0800
Subject: [PATCH 1/7] =?UTF-8?q?[=F0=9D=98=80=F0=9D=97=BD=F0=9D=97=BF]=20in?=
=?UTF-8?q?itial=20version?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Created using spr 1.3.6-beta.1
---
.../Target/RISCV/RISCVTargetTransformInfo.cpp | 15 +
.../Target/RISCV/RISCVTargetTransformInfo.h | 3 +
llvm/test/CodeGen/RISCV/memcmp.ll | 932 ++++++++++++++++++
3 files changed, 950 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/memcmp.ll
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index e809e15eacf696..ad532aadc83266 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -2113,3 +2113,18 @@ bool RISCVTTIImpl::shouldConsiderAddressTypePromotion(
}
return Considerable;
}
+
+RISCVTTIImpl::TTI::MemCmpExpansionOptions
+RISCVTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
+ TTI::MemCmpExpansionOptions Options;
+ // FIXME: Vector haven't been tested.
+ Options.AllowOverlappingLoads =
+ (ST->enableUnalignedScalarMem() || ST->enableUnalignedScalarMem());
+ Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
+ Options.NumLoadsPerBlock = Options.MaxNumLoads;
+ if (ST->is64Bit())
+ Options.LoadSizes.push_back(8);
+ llvm::append_range(Options.LoadSizes, ArrayRef({4, 2, 1}));
+ Options.AllowedTailExpansions = {3, 5, 6};
+ return Options;
+}
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 763b89bfec0a66..ee9bed09df97f3 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -404,6 +404,9 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
shouldConsiderAddressTypePromotion(const Instruction &I,
bool &AllowPromotionWithoutCommonHeader);
std::optional<unsigned> getMinPageSize() const { return 4096; }
+
+ TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
+ bool IsZeroCmp) const;
};
} // end namespace llvm
diff --git a/llvm/test/CodeGen/RISCV/memcmp.ll b/llvm/test/CodeGen/RISCV/memcmp.ll
new file mode 100644
index 00000000000000..652cd02e2c750a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/memcmp.ll
@@ -0,0 +1,932 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -O2 | FileCheck %s --check-prefix=CHECK-ALIGNED-RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -O2 | FileCheck %s --check-prefix=CHECK-ALIGNED-RV64
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+unaligned-scalar-mem -O2 \
+; RUN: | FileCheck %s --check-prefix=CHECK-UNALIGNED-RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+unaligned-scalar-mem -O2 \
+; RUN: | FileCheck %s --check-prefix=CHECK-UNALIGNED-RV64
+
+declare i32 @bcmp(i8*, i8*, iXLen) nounwind readonly
+declare i32 @memcmp(i8*, i8*, iXLen) nounwind readonly
+
+define i1 @bcmp_size_15(i8* %s1, i8* %s2) {
+; CHECK-ALIGNED-RV32-LABEL: bcmp_size_15:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
+; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
+; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
+; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
+; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
+; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
+; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 0(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a5, 2(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
+; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
+; CHECK-ALIGNED-RV32-NEXT: lbu a3, 5(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 4(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a5, 6(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a6, 7(a0)
+; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 5(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a5, 4(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a6, 6(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a7, 7(a1)
+; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV32-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV32-NEXT: xor a3, a3, a4
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 9(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a5, 8(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a6, 10(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a7, 11(a0)
+; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV32-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV32-NEXT: lbu a5, 9(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a6, 8(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a7, 10(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu t0, 11(a1)
+; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
+; CHECK-ALIGNED-RV32-NEXT: or a5, a5, a6
+; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
+; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 24
+; CHECK-ALIGNED-RV32-NEXT: or a6, t0, a7
+; CHECK-ALIGNED-RV32-NEXT: lbu a7, 13(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu t0, 12(a0)
+; CHECK-ALIGNED-RV32-NEXT: or a5, a6, a5
+; CHECK-ALIGNED-RV32-NEXT: xor a4, a4, a5
+; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
+; CHECK-ALIGNED-RV32-NEXT: or a5, a7, t0
+; CHECK-ALIGNED-RV32-NEXT: lbu a6, 13(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a7, 12(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a0, 14(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a1, 14(a1)
+; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 8
+; CHECK-ALIGNED-RV32-NEXT: or a6, a6, a7
+; CHECK-ALIGNED-RV32-NEXT: xor a5, a5, a6
+; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
+; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
+; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
+; CHECK-ALIGNED-RV32-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: bcmp_size_15:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
+; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 5(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 6(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 7(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
+; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 2(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 3(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 5(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 6(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 7(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 9(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 10(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 9(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 10(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 11(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 13(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 12(a0)
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: xor a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a7
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 13(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 12(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a0, 14(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a1, 14(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
+; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-ALIGNED-RV64-NEXT: xor a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV64-NEXT: or a0, a4, a0
+; CHECK-ALIGNED-RV64-NEXT: or a0, a3, a0
+; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
+; CHECK-ALIGNED-RV64-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-LABEL: bcmp_size_15:
+; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-NEXT: lw a2, 0(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw a4, 4(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a5, 4(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw a6, 8(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a7, 8(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw a0, 11(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a1, 11(a1)
+; CHECK-UNALIGNED-RV32-NEXT: xor a2, a2, a3
+; CHECK-UNALIGNED-RV32-NEXT: xor a4, a4, a5
+; CHECK-UNALIGNED-RV32-NEXT: xor a3, a6, a7
+; CHECK-UNALIGNED-RV32-NEXT: xor a0, a0, a1
+; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
+; CHECK-UNALIGNED-RV32-NEXT: or a0, a3, a0
+; CHECK-UNALIGNED-RV32-NEXT: or a0, a2, a0
+; CHECK-UNALIGNED-RV32-NEXT: seqz a0, a0
+; CHECK-UNALIGNED-RV32-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-LABEL: bcmp_size_15:
+; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-NEXT: ld a2, 0(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a3, 0(a1)
+; CHECK-UNALIGNED-RV64-NEXT: ld a0, 7(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a1, 7(a1)
+; CHECK-UNALIGNED-RV64-NEXT: xor a2, a2, a3
+; CHECK-UNALIGNED-RV64-NEXT: xor a0, a0, a1
+; CHECK-UNALIGNED-RV64-NEXT: or a0, a2, a0
+; CHECK-UNALIGNED-RV64-NEXT: seqz a0, a0
+; CHECK-UNALIGNED-RV64-NEXT: ret
+entry:
+ %bcmp = call i32 @bcmp(i8* %s1, i8* %s2, iXLen 15)
+ %ret = icmp eq i32 %bcmp, 0
+ ret i1 %ret
+}
+
+define i1 @bcmp_size_31(i8* %s1, i8* %s2) {
+; CHECK-ALIGNED-RV32-LABEL: bcmp_size_31:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: .cfi_def_cfa_offset 16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: .cfi_offset ra, -4
+; CHECK-ALIGNED-RV32-NEXT: li a2, 31
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: bcmp_size_31:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
+; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 5(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 6(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 7(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
+; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 2(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 3(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 5(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 6(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 7(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 9(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 10(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 13(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 12(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 14(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 15(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 9(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 10(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 11(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 13(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 12(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 14(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 15(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
+; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
+; CHECK-ALIGNED-RV64-NEXT: or a6, t0, a7
+; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: xor a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 17(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 16(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 18(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 19(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 21(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 20(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 22(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 23(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
+; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
+; CHECK-ALIGNED-RV64-NEXT: or a6, t0, a7
+; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 17(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 16(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 18(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 19(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
+; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
+; CHECK-ALIGNED-RV64-NEXT: or a6, t0, a7
+; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 21(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 20(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 22(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t1, 23(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
+; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 16
+; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
+; CHECK-ALIGNED-RV64-NEXT: or a7, t1, t0
+; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 32
+; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: xor a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 25(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 24(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 26(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 27(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
+; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
+; CHECK-ALIGNED-RV64-NEXT: or a6, t0, a7
+; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 25(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 24(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 26(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t1, 27(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
+; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 16
+; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
+; CHECK-ALIGNED-RV64-NEXT: or a7, t1, t0
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 29(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu t1, 28(a0)
+; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: xor a5, a5, a6
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
+; CHECK-ALIGNED-RV64-NEXT: or a6, t0, t1
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 29(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 28(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a0, 30(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a1, 30(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
+; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
+; CHECK-ALIGNED-RV64-NEXT: xor a6, a6, a7
+; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: or a0, a6, a0
+; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
+; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
+; CHECK-ALIGNED-RV64-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-LABEL: bcmp_size_31:
+; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-NEXT: lw a2, 0(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw a4, 4(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a5, 4(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw a6, 8(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a7, 8(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw t0, 12(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw t1, 12(a1)
+; CHECK-UNALIGNED-RV32-NEXT: xor a2, a2, a3
+; CHECK-UNALIGNED-RV32-NEXT: xor a4, a4, a5
+; CHECK-UNALIGNED-RV32-NEXT: xor a3, a6, a7
+; CHECK-UNALIGNED-RV32-NEXT: xor a5, t0, t1
+; CHECK-UNALIGNED-RV32-NEXT: lw a6, 16(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a7, 16(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw t0, 20(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw t1, 20(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw t2, 24(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw t3, 24(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw a0, 27(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a1, 27(a1)
+; CHECK-UNALIGNED-RV32-NEXT: xor a6, a6, a7
+; CHECK-UNALIGNED-RV32-NEXT: xor a7, t0, t1
+; CHECK-UNALIGNED-RV32-NEXT: xor t0, t2, t3
+; CHECK-UNALIGNED-RV32-NEXT: xor a0, a0, a1
+; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
+; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV32-NEXT: or a1, a6, a7
+; CHECK-UNALIGNED-RV32-NEXT: or a0, t0, a0
+; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a3
+; CHECK-UNALIGNED-RV32-NEXT: or a0, a1, a0
+; CHECK-UNALIGNED-RV32-NEXT: or a0, a2, a0
+; CHECK-UNALIGNED-RV32-NEXT: seqz a0, a0
+; CHECK-UNALIGNED-RV32-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-LABEL: bcmp_size_31:
+; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-NEXT: ld a2, 0(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a3, 0(a1)
+; CHECK-UNALIGNED-RV64-NEXT: ld a4, 8(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a5, 8(a1)
+; CHECK-UNALIGNED-RV64-NEXT: ld a6, 16(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a7, 16(a1)
+; CHECK-UNALIGNED-RV64-NEXT: ld a0, 23(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a1, 23(a1)
+; CHECK-UNALIGNED-RV64-NEXT: xor a2, a2, a3
+; CHECK-UNALIGNED-RV64-NEXT: xor a4, a4, a5
+; CHECK-UNALIGNED-RV64-NEXT: xor a3, a6, a7
+; CHECK-UNALIGNED-RV64-NEXT: xor a0, a0, a1
+; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
+; CHECK-UNALIGNED-RV64-NEXT: or a0, a3, a0
+; CHECK-UNALIGNED-RV64-NEXT: or a0, a2, a0
+; CHECK-UNALIGNED-RV64-NEXT: seqz a0, a0
+; CHECK-UNALIGNED-RV64-NEXT: ret
+entry:
+ %bcmp = call i32 @bcmp(i8* %s1, i8* %s2, iXLen 31)
+ %ret = icmp eq i32 %bcmp, 0
+ ret i1 %ret
+}
+
+define i1 @memcmp_size_15(i8* %s1, i8* %s2) {
+; CHECK-ALIGNED-RV32-LABEL: memcmp_size_15:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
+; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
+; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
+; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
+; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
+; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
+; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 0(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a5, 2(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
+; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
+; CHECK-ALIGNED-RV32-NEXT: lbu a3, 5(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 4(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a5, 6(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a6, 7(a0)
+; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 5(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a5, 4(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a6, 6(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a7, 7(a1)
+; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV32-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV32-NEXT: xor a3, a3, a4
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 9(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a5, 8(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a6, 10(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a7, 11(a0)
+; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV32-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV32-NEXT: lbu a5, 9(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a6, 8(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a7, 10(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu t0, 11(a1)
+; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
+; CHECK-ALIGNED-RV32-NEXT: or a5, a5, a6
+; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
+; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 24
+; CHECK-ALIGNED-RV32-NEXT: or a6, t0, a7
+; CHECK-ALIGNED-RV32-NEXT: lbu a7, 13(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu t0, 12(a0)
+; CHECK-ALIGNED-RV32-NEXT: or a5, a6, a5
+; CHECK-ALIGNED-RV32-NEXT: xor a4, a4, a5
+; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
+; CHECK-ALIGNED-RV32-NEXT: or a5, a7, t0
+; CHECK-ALIGNED-RV32-NEXT: lbu a6, 13(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a7, 12(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a0, 14(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a1, 14(a1)
+; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 8
+; CHECK-ALIGNED-RV32-NEXT: or a6, a6, a7
+; CHECK-ALIGNED-RV32-NEXT: xor a5, a5, a6
+; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
+; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
+; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
+; CHECK-ALIGNED-RV32-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: memcmp_size_15:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
+; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 5(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 6(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 7(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
+; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 2(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 3(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 5(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 6(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 7(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 9(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 10(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 9(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 10(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 11(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 13(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 12(a0)
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: xor a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a7
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 13(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 12(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a0, 14(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a1, 14(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
+; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-ALIGNED-RV64-NEXT: xor a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV64-NEXT: or a0, a4, a0
+; CHECK-ALIGNED-RV64-NEXT: or a0, a3, a0
+; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
+; CHECK-ALIGNED-RV64-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_15:
+; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-NEXT: lw a2, 0(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw a4, 4(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a5, 4(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw a6, 8(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a7, 8(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw a0, 11(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a1, 11(a1)
+; CHECK-UNALIGNED-RV32-NEXT: xor a2, a2, a3
+; CHECK-UNALIGNED-RV32-NEXT: xor a4, a4, a5
+; CHECK-UNALIGNED-RV32-NEXT: xor a3, a6, a7
+; CHECK-UNALIGNED-RV32-NEXT: xor a0, a0, a1
+; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
+; CHECK-UNALIGNED-RV32-NEXT: or a0, a3, a0
+; CHECK-UNALIGNED-RV32-NEXT: or a0, a2, a0
+; CHECK-UNALIGNED-RV32-NEXT: seqz a0, a0
+; CHECK-UNALIGNED-RV32-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_15:
+; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-NEXT: ld a2, 0(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a3, 0(a1)
+; CHECK-UNALIGNED-RV64-NEXT: ld a0, 7(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a1, 7(a1)
+; CHECK-UNALIGNED-RV64-NEXT: xor a2, a2, a3
+; CHECK-UNALIGNED-RV64-NEXT: xor a0, a0, a1
+; CHECK-UNALIGNED-RV64-NEXT: or a0, a2, a0
+; CHECK-UNALIGNED-RV64-NEXT: seqz a0, a0
+; CHECK-UNALIGNED-RV64-NEXT: ret
+entry:
+ %memcmp = call i32 @memcmp(i8* %s1, i8* %s2, iXLen 15)
+ %ret = icmp eq i32 %memcmp, 0
+ ret i1 %ret
+}
+
+define i1 @memcmp_size_31(i8* %s1, i8* %s2) {
+; CHECK-ALIGNED-RV32-LABEL: memcmp_size_31:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: .cfi_def_cfa_offset 16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: .cfi_offset ra, -4
+; CHECK-ALIGNED-RV32-NEXT: li a2, 31
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: memcmp_size_31:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
+; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 5(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 6(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 7(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
+; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 2(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 3(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 5(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 6(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 7(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 9(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 10(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
+; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 13(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 12(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 14(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 15(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
+; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 9(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 10(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 11(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 13(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 12(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 14(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 15(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
+; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
+; CHECK-ALIGNED-RV64-NEXT: or a6, t0, a7
+; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: xor a3, a3, a4
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 17(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 16(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 18(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 19(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 21(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 20(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 22(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 23(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
+; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
+; CHECK-ALIGNED-RV64-NEXT: or a6, t0, a7
+; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
+; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 17(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 16(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 18(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 19(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
+; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
+; CHECK-ALIGNED-RV64-NEXT: or a6, t0, a7
+; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 21(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 20(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 22(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t1, 23(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
+; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 16
+; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
+; CHECK-ALIGNED-RV64-NEXT: or a7, t1, t0
+; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 32
+; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: xor a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: lbu a5, 25(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 24(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 26(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 27(a0)
+; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
+; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
+; CHECK-ALIGNED-RV64-NEXT: or a6, t0, a7
+; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
+; CHECK-ALIGNED-RV64-NEXT: lbu a6, 25(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 24(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 26(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t1, 27(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
+; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 16
+; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
+; CHECK-ALIGNED-RV64-NEXT: or a7, t1, t0
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 29(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu t1, 28(a0)
+; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
+; CHECK-ALIGNED-RV64-NEXT: xor a5, a5, a6
+; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
+; CHECK-ALIGNED-RV64-NEXT: or a6, t0, t1
+; CHECK-ALIGNED-RV64-NEXT: lbu a7, 29(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu t0, 28(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a0, 30(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a1, 30(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
+; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
+; CHECK-ALIGNED-RV64-NEXT: xor a6, a6, a7
+; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
+; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-ALIGNED-RV64-NEXT: or a0, a6, a0
+; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
+; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
+; CHECK-ALIGNED-RV64-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_31:
+; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-NEXT: lw a2, 0(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw a4, 4(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a5, 4(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw a6, 8(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a7, 8(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw t0, 12(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw t1, 12(a1)
+; CHECK-UNALIGNED-RV32-NEXT: xor a2, a2, a3
+; CHECK-UNALIGNED-RV32-NEXT: xor a4, a4, a5
+; CHECK-UNALIGNED-RV32-NEXT: xor a3, a6, a7
+; CHECK-UNALIGNED-RV32-NEXT: xor a5, t0, t1
+; CHECK-UNALIGNED-RV32-NEXT: lw a6, 16(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a7, 16(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw t0, 20(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw t1, 20(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw t2, 24(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw t3, 24(a1)
+; CHECK-UNALIGNED-RV32-NEXT: lw a0, 27(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lw a1, 27(a1)
+; CHECK-UNALIGNED-RV32-NEXT: xor a6, a6, a7
+; CHECK-UNALIGNED-RV32-NEXT: xor a7, t0, t1
+; CHECK-UNALIGNED-RV32-NEXT: xor t0, t2, t3
+; CHECK-UNALIGNED-RV32-NEXT: xor a0, a0, a1
+; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
+; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV32-NEXT: or a1, a6, a7
+; CHECK-UNALIGNED-RV32-NEXT: or a0, t0, a0
+; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a3
+; CHECK-UNALIGNED-RV32-NEXT: or a0, a1, a0
+; CHECK-UNALIGNED-RV32-NEXT: or a0, a2, a0
+; CHECK-UNALIGNED-RV32-NEXT: seqz a0, a0
+; CHECK-UNALIGNED-RV32-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_31:
+; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-NEXT: ld a2, 0(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a3, 0(a1)
+; CHECK-UNALIGNED-RV64-NEXT: ld a4, 8(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a5, 8(a1)
+; CHECK-UNALIGNED-RV64-NEXT: ld a6, 16(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a7, 16(a1)
+; CHECK-UNALIGNED-RV64-NEXT: ld a0, 23(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a1, 23(a1)
+; CHECK-UNALIGNED-RV64-NEXT: xor a2, a2, a3
+; CHECK-UNALIGNED-RV64-NEXT: xor a4, a4, a5
+; CHECK-UNALIGNED-RV64-NEXT: xor a3, a6, a7
+; CHECK-UNALIGNED-RV64-NEXT: xor a0, a0, a1
+; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
+; CHECK-UNALIGNED-RV64-NEXT: or a0, a3, a0
+; CHECK-UNALIGNED-RV64-NEXT: or a0, a2, a0
+; CHECK-UNALIGNED-RV64-NEXT: seqz a0, a0
+; CHECK-UNALIGNED-RV64-NEXT: ret
+entry:
+ %memcmp = call i32 @memcmp(i8* %s1, i8* %s2, iXLen 31)
+ %ret = icmp eq i32 %memcmp, 0
+ ret i1 %ret
+}
>From 2caea13ab7795c32476a7102028b9ac0a3ebf9b6 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Fri, 6 Sep 2024 18:07:49 +0800
Subject: [PATCH 2/7] Fix copt-paste mistake
Created using spr 1.3.6-beta.1
---
llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index ad532aadc83266..ae5ebf16dc6da0 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -2119,7 +2119,7 @@ RISCVTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
TTI::MemCmpExpansionOptions Options;
// FIXME: Vector haven't been tested.
Options.AllowOverlappingLoads =
- (ST->enableUnalignedScalarMem() || ST->enableUnalignedScalarMem());
+ (ST->enableUnalignedScalarMem() || ST->enableUnalignedVectorMem());
Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
Options.NumLoadsPerBlock = Options.MaxNumLoads;
if (ST->is64Bit())
>From a96e1aaf9a4cbe8e8dd09f4f4b1260b5c63541df Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Mon, 9 Sep 2024 21:44:22 +0800
Subject: [PATCH 3/7] Don't add 5/6 to AllowedTailExpansions for RV32
Created using spr 1.3.6-beta.1
---
llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index ae5ebf16dc6da0..f2fe52bb939e07 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -2125,6 +2125,8 @@ RISCVTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
if (ST->is64Bit())
Options.LoadSizes.push_back(8);
llvm::append_range(Options.LoadSizes, ArrayRef({4, 2, 1}));
- Options.AllowedTailExpansions = {3, 5, 6};
+ Options.AllowedTailExpansions = {3};
+ if (ST->is64Bit())
+ llvm::append_range(Options.AllowedTailExpansions, ArrayRef{5, 6});
return Options;
}
>From 86b823d7f70d847b140af48cd1c0c7a461897d77 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Mon, 9 Sep 2024 21:52:13 +0800
Subject: [PATCH 4/7] Remove AllowedTailExpansions
Created using spr 1.3.6-beta.1
---
llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp | 3 ---
1 file changed, 3 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index f2fe52bb939e07..2ec4483f072d5a 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -2125,8 +2125,5 @@ RISCVTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
if (ST->is64Bit())
Options.LoadSizes.push_back(8);
llvm::append_range(Options.LoadSizes, ArrayRef({4, 2, 1}));
- Options.AllowedTailExpansions = {3};
- if (ST->is64Bit())
- llvm::append_range(Options.AllowedTailExpansions, ArrayRef{5, 6});
return Options;
}
>From e709f8d43874d8370ec39a29e8e05e6a4f612da0 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Fri, 13 Sep 2024 13:13:09 +0800
Subject: [PATCH 5/7] Explicitly set the LoadSizes
Created using spr 1.3.6-beta.1
---
llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 2ec4483f072d5a..9bc262c8a06a33 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -2123,7 +2123,8 @@ RISCVTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
Options.NumLoadsPerBlock = Options.MaxNumLoads;
if (ST->is64Bit())
- Options.LoadSizes.push_back(8);
- llvm::append_range(Options.LoadSizes, ArrayRef({4, 2, 1}));
+ Options.LoadSizes = {8, 4, 2, 1};
+ else
+ Options.LoadSizes = {4, 2, 1};
return Options;
}
>From 2fe0d42cbaaea961c181e22d6ade925e76285f03 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Tue, 5 Nov 2024 17:25:32 +0800
Subject: [PATCH 6/7] Apply suggestion to AllowOverlappingLoads
Created using spr 1.3.6-beta.1
---
.../Target/RISCV/RISCVTargetTransformInfo.cpp | 4 +-
llvm/test/CodeGen/RISCV/memcmp-optsize.ll | 1192 +++------
llvm/test/CodeGen/RISCV/memcmp.ll | 2198 ++++-------------
3 files changed, 792 insertions(+), 2602 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 5f5a18e2868730..4f16a141c99c0a 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -2495,9 +2495,9 @@ bool RISCVTTIImpl::isProfitableToSinkOperands(
RISCVTTIImpl::TTI::MemCmpExpansionOptions
RISCVTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
TTI::MemCmpExpansionOptions Options;
- // FIXME: Vector haven't been tested.
Options.AllowOverlappingLoads =
- (ST->enableUnalignedScalarMem() || ST->enableUnalignedVectorMem());
+ ST->enableUnalignedScalarMem() &&
+ (ST->hasStdExtZbb() || ST->hasStdExtZbkb() || IsZeroCmp);
Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
Options.NumLoadsPerBlock = Options.MaxNumLoads;
if (ST->is64Bit())
diff --git a/llvm/test/CodeGen/RISCV/memcmp-optsize.ll b/llvm/test/CodeGen/RISCV/memcmp-optsize.ll
index 06fb88b02ea4a6..3733f16f5e884f 100644
--- a/llvm/test/CodeGen/RISCV/memcmp-optsize.ll
+++ b/llvm/test/CodeGen/RISCV/memcmp-optsize.ll
@@ -7338,49 +7338,42 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-UNALIGNED-RV32-NEXT: lw a4, 0(a1)
; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
; CHECK-UNALIGNED-RV32-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a2, a2, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV32-NEXT: addi a6, a2, -256
+; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV32-NEXT: srli a7, a3, 24
+; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV32-NEXT: and a7, a3, a6
+; CHECK-UNALIGNED-RV32-NEXT: slli a7, a7, 8
; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a7
; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a2
+; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV32-NEXT: srli a7, a4, 24
+; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a6
; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB28_3
; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 3(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 3(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a2
+; CHECK-UNALIGNED-RV32-NEXT: lhu a3, 4(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lhu a4, 4(a1)
+; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
+; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a1, a0
+; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
+; CHECK-UNALIGNED-RV32-NEXT: addi a2, a2, -1
+; CHECK-UNALIGNED-RV32-NEXT: and a3, a3, a2
+; CHECK-UNALIGNED-RV32-NEXT: and a4, a4, a2
; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV32-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
+; CHECK-UNALIGNED-RV32-NEXT: lbu a0, 6(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lbu a1, 6(a1)
+; CHECK-UNALIGNED-RV32-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV32-NEXT: ret
; CHECK-UNALIGNED-RV32-NEXT: .LBB28_3: # %res_block
; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a3, a4
@@ -7394,49 +7387,42 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-UNALIGNED-RV64-NEXT: lw a4, 0(a1)
; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
; CHECK-UNALIGNED-RV64-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV64-NEXT: addiw a6, a2, -256
+; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a3, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-NEXT: and a7, a3, a6
+; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 8
; CHECK-UNALIGNED-RV64-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a4, a2
+; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a4, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-NEXT: and a6, a4, a6
; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
; CHECK-UNALIGNED-RV64-NEXT: slliw a4, a4, 24
; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB28_3
; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: lw a0, 3(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a1, 3(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a0, a2
+; CHECK-UNALIGNED-RV64-NEXT: lhu a3, 4(a0)
+; CHECK-UNALIGNED-RV64-NEXT: lhu a4, 4(a1)
+; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a1, a0
+; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -1
+; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a2
+; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a2
; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
+; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 6(a0)
+; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 6(a1)
+; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV64-NEXT: ret
; CHECK-UNALIGNED-RV64-NEXT: .LBB28_3: # %res_block
; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a3, a4
@@ -7546,49 +7532,42 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 0(a1)
; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
; CHECK-UNALIGNED-RV32-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a2, a2, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV32-V-NEXT: addi a6, a2, -256
+; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a3, 24
+; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV32-V-NEXT: and a7, a3, a6
+; CHECK-UNALIGNED-RV32-V-NEXT: slli a7, a7, 8
; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a7
; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a2
+; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a4, 24
+; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a6
; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB28_3
; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 3(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 3(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a2
+; CHECK-UNALIGNED-RV32-V-NEXT: lhu a3, 4(a0)
+; CHECK-UNALIGNED-RV32-V-NEXT: lhu a4, 4(a1)
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
+; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a1, a0
+; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
+; CHECK-UNALIGNED-RV32-V-NEXT: addi a2, a2, -1
+; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a3, a2
+; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a4, a2
; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV32-V-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
+; CHECK-UNALIGNED-RV32-V-NEXT: lbu a0, 6(a0)
+; CHECK-UNALIGNED-RV32-V-NEXT: lbu a1, 6(a1)
+; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV32-V-NEXT: ret
; CHECK-UNALIGNED-RV32-V-NEXT: .LBB28_3: # %res_block
; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a3, a4
@@ -7602,49 +7581,42 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-UNALIGNED-RV64-V-NEXT: lw a4, 0(a1)
; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: addiw a6, a2, -256
+; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a3, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a3, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 8
; CHECK-UNALIGNED-RV64-V-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a4, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a4, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a4, a6
; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
; CHECK-UNALIGNED-RV64-V-NEXT: slliw a4, a4, 24
; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB28_3
; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a0, 3(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a1, 3(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a0, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: lhu a3, 4(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: lhu a4, 4(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a1, a0
+; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -1
+; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a2
; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
+; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 6(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 6(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV64-V-NEXT: ret
; CHECK-UNALIGNED-RV64-V-NEXT: .LBB28_3: # %res_block
; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a3, a4
@@ -9000,209 +8972,113 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_15:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 8(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 8(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 11(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 11(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a1, a0
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV32-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB30_5: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 15
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_15:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a5, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a3, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a2, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a5, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a4, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a5, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a5, 56
+; CHECK-UNALIGNED-RV64-NEXT: ld a3, 0(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a4, 0(a1)
+; CHECK-UNALIGNED-RV64-NEXT: srli a2, a3, 24
+; CHECK-UNALIGNED-RV64-NEXT: lui a5, 4080
+; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a5
+; CHECK-UNALIGNED-RV64-NEXT: srli a6, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: li a7, 255
+; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a7
+; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a2
+; CHECK-UNALIGNED-RV64-NEXT: srli t0, a3, 40
+; CHECK-UNALIGNED-RV64-NEXT: lui a2, 16
+; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -256
+; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a2
+; CHECK-UNALIGNED-RV64-NEXT: srli t1, a3, 56
; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a5, a2
+; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
+; CHECK-UNALIGNED-RV64-NEXT: and t0, a3, a5
; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a5, 24
+; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a3, 24
; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a5, a3
+; CHECK-UNALIGNED-RV64-NEXT: and t1, a3, a2
; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
+; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 56
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, t1
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, t0
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV64-NEXT: srli a6, a4, 24
+; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a5
+; CHECK-UNALIGNED-RV64-NEXT: srli t0, a4, 8
+; CHECK-UNALIGNED-RV64-NEXT: and a7, t0, a7
+; CHECK-UNALIGNED-RV64-NEXT: or a6, a7, a6
+; CHECK-UNALIGNED-RV64-NEXT: srli a7, a4, 40
; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
+; CHECK-UNALIGNED-RV64-NEXT: srli t0, a4, 56
; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a5, a6, .LBB30_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: ld a0, 7(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a1, 7(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a0, 56
; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a0, 24
+; CHECK-UNALIGNED-RV64-NEXT: and a5, a4, a5
+; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
+; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a4, 24
; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a0, a3
+; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-NEXT: and a7, a4, a2
; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a0, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a1, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 56
+; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 56
+; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a7
+; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 32
+; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB30_4
+; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
+; CHECK-UNALIGNED-RV64-NEXT: lw a3, 8(a0)
+; CHECK-UNALIGNED-RV64-NEXT: lw a4, 8(a1)
+; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
+; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a3, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-UNALIGNED-RV64-NEXT: and a6, a3, a2
+; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
+; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
+; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a4, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-UNALIGNED-RV64-NEXT: and a2, a4, a2
+; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a2, a4, a2
+; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a5
+; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 32
+; CHECK-UNALIGNED-RV64-NEXT: srli a3, a3, 32
+; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 32
+; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 32
+; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB30_4
+; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
+; CHECK-UNALIGNED-RV64-NEXT: lhu a2, 12(a0)
+; CHECK-UNALIGNED-RV64-NEXT: lhu a3, 12(a1)
+; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: bne a5, a6, .LBB30_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV64-NEXT: srli a4, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: or a4, a3, a4
+; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
+; CHECK-UNALIGNED-RV64-NEXT: addiw a5, a3, -1
+; CHECK-UNALIGNED-RV64-NEXT: and a3, a2, a5
+; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a5
+; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB30_4
+; CHECK-UNALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
+; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 14(a0)
+; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 14(a1)
+; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB30_3: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a5, a6
+; CHECK-UNALIGNED-RV64-NEXT: .LBB30_4: # %res_block
+; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a3, a4
; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
; CHECK-UNALIGNED-RV64-NEXT: ret
@@ -9321,209 +9197,113 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_15:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 8(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 8(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 11(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 11(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a1, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB30_5: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 15
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_15:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a5, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a5, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a4, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a5, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a5, 56
+; CHECK-UNALIGNED-RV64-V-NEXT: ld a3, 0(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: ld a4, 0(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a3, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: lui a5, 4080
+; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: li a7, 255
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a3, 40
+; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 16
+; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -256
+; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a3, 56
; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a5, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
+; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a3, a5
; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a5, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a3, 24
; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a5, a3
+; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a3, a2
; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 56
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, t1
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, t0
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a4, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a4, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: and a7, t0, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a7, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a4, 40
; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
+; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a4, 56
; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a5, a6, .LBB30_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a0, 7(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a1, 7(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a0, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a4, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a4, 24
; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a0, a3
+; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a4, a2
; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 56
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 56
+; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB30_4
+; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
+; CHECK-UNALIGNED-RV64-V-NEXT: lw a3, 8(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: lw a4, 8(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a3, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a3, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a4, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a4, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a4, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a3, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB30_4
+; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
+; CHECK-UNALIGNED-RV64-V-NEXT: lhu a2, 12(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: lhu a3, 12(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a5, a6, .LBB30_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a3, a4
+; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
+; CHECK-UNALIGNED-RV64-V-NEXT: addiw a5, a3, -1
+; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a2, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB30_4
+; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
+; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 14(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 14(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB30_3: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a5, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: .LBB30_4: # %res_block
+; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a3, a4
; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
; CHECK-UNALIGNED-RV64-V-NEXT: ret
@@ -11078,206 +10858,12 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_31:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 8(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 8(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 16(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-NEXT: ld a0, 23(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a1, 23(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a6, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a5, a0
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a1, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a5, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB32_5: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 31
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV64-ZBB-LABEL: memcmp_size_31:
@@ -11350,206 +10936,12 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_31:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 8(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 8(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 16(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a0, 23(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a1, 23(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a6, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a5, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a5, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB32_5: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 31
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 31)
diff --git a/llvm/test/CodeGen/RISCV/memcmp.ll b/llvm/test/CodeGen/RISCV/memcmp.ll
index 9fa69ab55158c6..00893a8dbe0f7e 100644
--- a/llvm/test/CodeGen/RISCV/memcmp.ll
+++ b/llvm/test/CodeGen/RISCV/memcmp.ll
@@ -10950,49 +10950,42 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-NEXT: lw a4, 0(a1)
; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
; CHECK-UNALIGNED-RV32-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a2, a2, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV32-NEXT: addi a6, a2, -256
+; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV32-NEXT: srli a7, a3, 24
+; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV32-NEXT: and a7, a3, a6
+; CHECK-UNALIGNED-RV32-NEXT: slli a7, a7, 8
; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a7
; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a2
+; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV32-NEXT: srli a7, a4, 24
+; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a6
; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB28_3
; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 3(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 3(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a2
+; CHECK-UNALIGNED-RV32-NEXT: lhu a3, 4(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lhu a4, 4(a1)
+; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
+; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a1, a0
+; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
+; CHECK-UNALIGNED-RV32-NEXT: addi a2, a2, -1
+; CHECK-UNALIGNED-RV32-NEXT: and a3, a3, a2
+; CHECK-UNALIGNED-RV32-NEXT: and a4, a4, a2
; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV32-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
+; CHECK-UNALIGNED-RV32-NEXT: lbu a0, 6(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lbu a1, 6(a1)
+; CHECK-UNALIGNED-RV32-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV32-NEXT: ret
; CHECK-UNALIGNED-RV32-NEXT: .LBB28_3: # %res_block
; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a3, a4
@@ -11006,49 +10999,42 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-NEXT: lw a4, 0(a1)
; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
; CHECK-UNALIGNED-RV64-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV64-NEXT: addiw a6, a2, -256
+; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a3, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-NEXT: and a7, a3, a6
+; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 8
; CHECK-UNALIGNED-RV64-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a4, a2
+; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a4, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-NEXT: and a6, a4, a6
; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
; CHECK-UNALIGNED-RV64-NEXT: slliw a4, a4, 24
; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB28_3
; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: lw a0, 3(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a1, 3(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a0, a2
+; CHECK-UNALIGNED-RV64-NEXT: lhu a3, 4(a0)
+; CHECK-UNALIGNED-RV64-NEXT: lhu a4, 4(a1)
+; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a1, a0
+; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -1
+; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a2
+; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a2
; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
+; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 6(a0)
+; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 6(a1)
+; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV64-NEXT: ret
; CHECK-UNALIGNED-RV64-NEXT: .LBB28_3: # %res_block
; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a3, a4
@@ -11158,49 +11144,42 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 0(a1)
; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
; CHECK-UNALIGNED-RV32-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a2, a2, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV32-V-NEXT: addi a6, a2, -256
+; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a3, 24
+; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV32-V-NEXT: and a7, a3, a6
+; CHECK-UNALIGNED-RV32-V-NEXT: slli a7, a7, 8
; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a7
; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a2
+; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a4, 24
+; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a6
; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB28_3
; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 3(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 3(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a2
+; CHECK-UNALIGNED-RV32-V-NEXT: lhu a3, 4(a0)
+; CHECK-UNALIGNED-RV32-V-NEXT: lhu a4, 4(a1)
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
+; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a1, a0
+; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
+; CHECK-UNALIGNED-RV32-V-NEXT: addi a2, a2, -1
+; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a3, a2
+; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a4, a2
; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV32-V-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
+; CHECK-UNALIGNED-RV32-V-NEXT: lbu a0, 6(a0)
+; CHECK-UNALIGNED-RV32-V-NEXT: lbu a1, 6(a1)
+; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV32-V-NEXT: ret
; CHECK-UNALIGNED-RV32-V-NEXT: .LBB28_3: # %res_block
; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a3, a4
@@ -11214,49 +11193,42 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-V-NEXT: lw a4, 0(a1)
; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: addiw a6, a2, -256
+; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a3, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a3, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 8
; CHECK-UNALIGNED-RV64-V-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a4, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a4, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a4, a6
; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
; CHECK-UNALIGNED-RV64-V-NEXT: slliw a4, a4, 24
; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB28_3
; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a0, 3(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a1, 3(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a0, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: lhu a3, 4(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: lhu a4, 4(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a1, a0
+; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -1
+; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a2
; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
+; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 6(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 6(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV64-V-NEXT: ret
; CHECK-UNALIGNED-RV64-V-NEXT: .LBB28_3: # %res_block
; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a3, a4
@@ -12976,49 +12948,42 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-NEXT: lw a4, 8(a1)
; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV32-NEXT: addi a6, a3, -256
+; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV32-NEXT: srli a7, a2, 24
+; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV32-NEXT: and a7, a2, a6
+; CHECK-UNALIGNED-RV32-NEXT: slli a7, a7, 8
; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
+; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a7
; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a3
+; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV32-NEXT: srli a7, a4, 24
+; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a6
; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB30_5
; CHECK-UNALIGNED-RV32-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 11(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 11(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a3
+; CHECK-UNALIGNED-RV32-NEXT: lhu a2, 12(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lhu a4, 12(a1)
+; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
+; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 8
+; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
+; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a1, a0
+; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
+; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -1
+; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
+; CHECK-UNALIGNED-RV32-NEXT: and a4, a4, a3
; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV32-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV32-NEXT: # %bb.4: # %loadbb4
+; CHECK-UNALIGNED-RV32-NEXT: lbu a0, 14(a0)
+; CHECK-UNALIGNED-RV32-NEXT: lbu a1, 14(a1)
+; CHECK-UNALIGNED-RV32-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV32-NEXT: ret
; CHECK-UNALIGNED-RV32-NEXT: .LBB30_5: # %res_block
; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a2, a4
@@ -13028,107 +12993,103 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_15:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a5, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a3, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a2, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a5, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a4, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a5, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a5, 56
+; CHECK-UNALIGNED-RV64-NEXT: ld a3, 0(a0)
+; CHECK-UNALIGNED-RV64-NEXT: ld a4, 0(a1)
+; CHECK-UNALIGNED-RV64-NEXT: srli a2, a3, 24
+; CHECK-UNALIGNED-RV64-NEXT: lui a5, 4080
+; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a5
+; CHECK-UNALIGNED-RV64-NEXT: srli a6, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: li a7, 255
+; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a7
+; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a2
+; CHECK-UNALIGNED-RV64-NEXT: srli t0, a3, 40
+; CHECK-UNALIGNED-RV64-NEXT: lui a2, 16
+; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -256
+; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a2
+; CHECK-UNALIGNED-RV64-NEXT: srli t1, a3, 56
; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a5, a2
+; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
+; CHECK-UNALIGNED-RV64-NEXT: and t0, a3, a5
; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a5, 24
+; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a3, 24
; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a5, a3
+; CHECK-UNALIGNED-RV64-NEXT: and t1, a3, a2
; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
+; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 56
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, t1
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, t0
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV64-NEXT: srli a6, a4, 24
+; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a5
+; CHECK-UNALIGNED-RV64-NEXT: srli t0, a4, 8
+; CHECK-UNALIGNED-RV64-NEXT: and a7, t0, a7
+; CHECK-UNALIGNED-RV64-NEXT: or a6, a7, a6
+; CHECK-UNALIGNED-RV64-NEXT: srli a7, a4, 40
; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
+; CHECK-UNALIGNED-RV64-NEXT: srli t0, a4, 56
; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a5, a6, .LBB30_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: ld a0, 7(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a1, 7(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a0, 56
; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a0, 24
+; CHECK-UNALIGNED-RV64-NEXT: and a5, a4, a5
+; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
+; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a4, 24
; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a0, a3
+; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-NEXT: and a7, a4, a2
; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a0, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a1, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 56
+; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 56
+; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a7
+; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 32
+; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB30_4
+; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
+; CHECK-UNALIGNED-RV64-NEXT: lw a3, 8(a0)
+; CHECK-UNALIGNED-RV64-NEXT: lw a4, 8(a1)
+; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
+; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a3, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-UNALIGNED-RV64-NEXT: and a6, a3, a2
+; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
+; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
+; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a4, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-UNALIGNED-RV64-NEXT: and a2, a4, a2
+; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a2, a4, a2
+; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a5
+; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 32
+; CHECK-UNALIGNED-RV64-NEXT: srli a3, a3, 32
+; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 32
+; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 32
+; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB30_4
+; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
+; CHECK-UNALIGNED-RV64-NEXT: lhu a2, 12(a0)
+; CHECK-UNALIGNED-RV64-NEXT: lhu a3, 12(a1)
+; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: bne a5, a6, .LBB30_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV64-NEXT: srli a4, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: or a4, a3, a4
+; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
+; CHECK-UNALIGNED-RV64-NEXT: addiw a5, a3, -1
+; CHECK-UNALIGNED-RV64-NEXT: and a3, a2, a5
+; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a5
+; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB30_4
+; CHECK-UNALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
+; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 14(a0)
+; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 14(a1)
+; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB30_3: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a5, a6
+; CHECK-UNALIGNED-RV64-NEXT: .LBB30_4: # %res_block
+; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a3, a4
; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
; CHECK-UNALIGNED-RV64-NEXT: ret
@@ -13297,49 +13258,42 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 8(a1)
; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV32-V-NEXT: addi a6, a3, -256
+; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a2, 24
+; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV32-V-NEXT: and a7, a2, a6
+; CHECK-UNALIGNED-RV32-V-NEXT: slli a7, a7, 8
; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
+; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a7
; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a3
+; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a4, 24
+; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a6
; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB30_5
; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 11(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 11(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a3
+; CHECK-UNALIGNED-RV32-V-NEXT: lhu a2, 12(a0)
+; CHECK-UNALIGNED-RV32-V-NEXT: lhu a4, 12(a1)
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
+; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 8
+; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a1, a0
+; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
+; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -1
+; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
+; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a4, a3
; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV32-V-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.4: # %loadbb4
+; CHECK-UNALIGNED-RV32-V-NEXT: lbu a0, 14(a0)
+; CHECK-UNALIGNED-RV32-V-NEXT: lbu a1, 14(a1)
+; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV32-V-NEXT: ret
; CHECK-UNALIGNED-RV32-V-NEXT: .LBB30_5: # %res_block
; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a2, a4
@@ -13349,107 +13303,103 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_15:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a5, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a5, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a4, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a5, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a5, 56
+; CHECK-UNALIGNED-RV64-V-NEXT: ld a3, 0(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: ld a4, 0(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a3, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: lui a5, 4080
+; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: li a7, 255
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a3, 40
+; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 16
+; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -256
+; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a3, 56
; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a5, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
+; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a3, a5
; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a5, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a3, 24
; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a5, a3
+; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a3, a2
; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 56
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, t1
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, t0
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a4, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a4, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: and a7, t0, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a7, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a4, 40
; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
+; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a4, 56
; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a5, a6, .LBB30_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a0, 7(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a1, 7(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a0, 56
; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a0, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a4, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a4, 24
; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a0, a3
+; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a4, a2
; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 56
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 56
+; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB30_4
+; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
+; CHECK-UNALIGNED-RV64-V-NEXT: lw a3, 8(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: lw a4, 8(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a3, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a3, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a4, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a4, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a4, a2
+; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a3, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB30_4
+; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
+; CHECK-UNALIGNED-RV64-V-NEXT: lhu a2, 12(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: lhu a3, 12(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a5, a6, .LBB30_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a3, a4
+; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
+; CHECK-UNALIGNED-RV64-V-NEXT: addiw a5, a3, -1
+; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a2, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB30_4
+; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
+; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 14(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 14(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB30_3: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a5, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: .LBB30_4: # %res_block
+; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a3, a4
; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
; CHECK-UNALIGNED-RV64-V-NEXT: ret
@@ -16032,196 +15982,12 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_31:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 8(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 8(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 12(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 12(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.4: # %loadbb4
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 16(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 16(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.5: # %loadbb5
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 20(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 20(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.6: # %loadbb6
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 24(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 24(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.7: # %loadbb7
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 27(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 27(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a3, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a3, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a1, a0
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.8:
-; CHECK-UNALIGNED-RV32-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB32_9: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 31
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_31:
@@ -16275,7 +16041,7 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_5
+; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_6
; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-NEXT: ld a2, 8(a0)
; CHECK-UNALIGNED-RV64-NEXT: ld a6, 8(a1)
@@ -16321,108 +16087,104 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-NEXT: or a4, a6, a4
; CHECK-UNALIGNED-RV64-NEXT: or a3, a4, a3
; CHECK-UNALIGNED-RV64-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_5
+; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_6
; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV64-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 16(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
+; CHECK-UNALIGNED-RV64-NEXT: ld a4, 16(a1)
+; CHECK-UNALIGNED-RV64-NEXT: srli a3, a2, 24
+; CHECK-UNALIGNED-RV64-NEXT: lui a5, 4080
+; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a5
+; CHECK-UNALIGNED-RV64-NEXT: srli a6, a2, 8
+; CHECK-UNALIGNED-RV64-NEXT: li a7, 255
+; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 24
+; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a7
+; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a3
; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
+; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
+; CHECK-UNALIGNED-RV64-NEXT: addiw a3, a3, -256
+; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a3
; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
+; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
+; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a5
; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
+; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a3
; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a6
+; CHECK-UNALIGNED-RV64-NEXT: srli a6, a4, 24
+; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a5
+; CHECK-UNALIGNED-RV64-NEXT: srli t0, a4, 8
+; CHECK-UNALIGNED-RV64-NEXT: and a7, t0, a7
+; CHECK-UNALIGNED-RV64-NEXT: or a6, a7, a6
+; CHECK-UNALIGNED-RV64-NEXT: srli a7, a4, 40
; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
+; CHECK-UNALIGNED-RV64-NEXT: srli t0, a4, 56
; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-NEXT: ld a0, 23(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a1, 23(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a6, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a0, 56
; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a0, 24
+; CHECK-UNALIGNED-RV64-NEXT: and a5, a4, a5
+; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
+; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a4, 24
; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a0, a4
+; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-NEXT: and a7, a4, a3
; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a5, a0
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a1, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 56
+; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 56
+; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a7
+; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
+; CHECK-UNALIGNED-RV64-NEXT: or a6, a4, a6
+; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_6
+; CHECK-UNALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
+; CHECK-UNALIGNED-RV64-NEXT: lw a2, 24(a0)
+; CHECK-UNALIGNED-RV64-NEXT: lw a4, 24(a1)
+; CHECK-UNALIGNED-RV64-NEXT: srli a5, a2, 8
+; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a3
+; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a2, 24
; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a5, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 32
+; CHECK-UNALIGNED-RV64-NEXT: and a6, a2, a3
+; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a6
+; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a5
+; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
+; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a3
+; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a4, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
+; CHECK-UNALIGNED-RV64-NEXT: and a3, a4, a3
+; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 24
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a4, a3
; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 32
+; CHECK-UNALIGNED-RV64-NEXT: srli a2, a2, 32
+; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 32
+; CHECK-UNALIGNED-RV64-NEXT: srli a6, a3, 32
+; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_6
+; CHECK-UNALIGNED-RV64-NEXT: # %bb.4: # %loadbb4
+; CHECK-UNALIGNED-RV64-NEXT: lhu a2, 28(a0)
+; CHECK-UNALIGNED-RV64-NEXT: lhu a3, 28(a1)
+; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
+; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
+; CHECK-UNALIGNED-RV64-NEXT: srli a4, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a4
+; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
+; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -1
+; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a4
+; CHECK-UNALIGNED-RV64-NEXT: and a6, a3, a4
+; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_6
+; CHECK-UNALIGNED-RV64-NEXT: # %bb.5: # %loadbb5
+; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 30(a0)
+; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 30(a1)
+; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB32_5: # %res_block
+; CHECK-UNALIGNED-RV64-NEXT: .LBB32_6: # %res_block
; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a2, a6
; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
@@ -16614,196 +16376,12 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_31:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 8(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 8(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 12(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 12(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.4: # %loadbb4
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 16(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 16(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.5: # %loadbb5
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 20(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 20(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.6: # %loadbb6
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 24(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 24(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.7: # %loadbb7
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 27(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 27(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a3, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a3, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a1, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB32_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.8:
-; CHECK-UNALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB32_9: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 31
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_31:
@@ -16857,7 +16435,7 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_5
+; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_6
; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 8(a0)
; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 8(a1)
@@ -16903,108 +16481,104 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a6, a4
; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a4, a3
; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_5
+; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_6
; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 16(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
+; CHECK-UNALIGNED-RV64-V-NEXT: ld a4, 16(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a2, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: lui a5, 4080
+; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a2, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: li a7, 255
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a3
; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
+; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
+; CHECK-UNALIGNED-RV64-V-NEXT: addiw a3, a3, -256
+; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a3
; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
+; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
+; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a5
; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
+; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a3
; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a4, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a4, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: and a7, t0, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a7, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a4, 40
; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
+; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a4, 56
; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a0, 23(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a1, 23(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a6, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a0, 56
; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a0, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a4, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a4, 24
; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a0, a4
+; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a4, a3
; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a5, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 56
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 56
+; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a7
+; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a4, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_6
+; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
+; CHECK-UNALIGNED-RV64-V-NEXT: lw a2, 24(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: lw a4, 24(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a2, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a3
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a2, 24
; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a5, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a2, a3
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a5
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a3
+; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a4, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
+; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a4, a3
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 24
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a4, a3
; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a2, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a3, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_6
+; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.4: # %loadbb4
+; CHECK-UNALIGNED-RV64-V-NEXT: lhu a2, 28(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: lhu a3, 28(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
+; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a4
+; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
+; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -1
+; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a4
+; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a3, a4
+; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_6
+; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.5: # %loadbb5
+; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 30(a0)
+; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 30(a1)
+; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB32_5: # %res_block
+; CHECK-UNALIGNED-RV64-V-NEXT: .LBB32_6: # %res_block
; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a2, a6
; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
@@ -19886,400 +19460,12 @@ define i32 @memcmp_size_63(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_63:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 8(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 8(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 16(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 24(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 24(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.4: # %loadbb4
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 32(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 32(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.5: # %loadbb5
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 40(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 40(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.6: # %loadbb6
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 48(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 48(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.7: # %loadbb7
-; CHECK-UNALIGNED-RV64-NEXT: ld a0, 55(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a1, 55(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a6, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a5, a0
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a1, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a5, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.8:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB34_9: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 63
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV64-ZBB-LABEL: memcmp_size_63:
@@ -20400,400 +19586,12 @@ define i32 @memcmp_size_63(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_63:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 8(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 8(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 16(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 24(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 24(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.4: # %loadbb4
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 32(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 32(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.5: # %loadbb5
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 40(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 40(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.6: # %loadbb6
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 48(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 48(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.7: # %loadbb7
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a0, 55(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a1, 55(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a6, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a5, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a5, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB34_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.8:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB34_9: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 63
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 63)
>From d695c496e0970e22a2af6ee4c067d220b1eed0ce Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Wed, 6 Nov 2024 12:13:45 +0800
Subject: [PATCH 7/7] unaligned only
Created using spr 1.3.6-beta.1
---
.../Target/RISCV/RISCVTargetTransformInfo.cpp | 10 +-
llvm/test/CodeGen/RISCV/memcmp-optsize.ll | 12153 ++-------
llvm/test/CodeGen/RISCV/memcmp.ll | 21429 ++--------------
3 files changed, 3843 insertions(+), 29749 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 4f16a141c99c0a..47f08035e3617c 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -2495,9 +2495,13 @@ bool RISCVTTIImpl::isProfitableToSinkOperands(
RISCVTTIImpl::TTI::MemCmpExpansionOptions
RISCVTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
TTI::MemCmpExpansionOptions Options;
- Options.AllowOverlappingLoads =
- ST->enableUnalignedScalarMem() &&
- (ST->hasStdExtZbb() || ST->hasStdExtZbkb() || IsZeroCmp);
+ // TODO: Enable expansion when unaligned access is not supported after we fix
+ // issues in ExpandMemcmp.
+ if (!(ST->enableUnalignedScalarMem() &&
+ (ST->hasStdExtZbb() || ST->hasStdExtZbkb() || IsZeroCmp)))
+ return Options;
+
+ Options.AllowOverlappingLoads = true;
Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
Options.NumLoadsPerBlock = Options.MaxNumLoads;
if (ST->is64Bit())
diff --git a/llvm/test/CodeGen/RISCV/memcmp-optsize.ll b/llvm/test/CodeGen/RISCV/memcmp-optsize.ll
index 3733f16f5e884f..e852579c724f8e 100644
--- a/llvm/test/CodeGen/RISCV/memcmp-optsize.ll
+++ b/llvm/test/CodeGen/RISCV/memcmp-optsize.ll
@@ -61,13 +61,93 @@ entry:
}
define i32 @bcmp_size_1(ptr %s1, ptr %s2) nounwind optsize {
-; CHECK-LABEL: bcmp_size_1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lbu a0, 0(a0)
-; CHECK-NEXT: lbu a1, 0(a1)
-; CHECK-NEXT: xor a0, a0, a1
-; CHECK-NEXT: snez a0, a0
-; CHECK-NEXT: ret
+; CHECK-ALIGNED-RV32-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-V-NEXT: ret
+;
+; CHECK-UNALIGNED-LABEL: bcmp_size_1:
+; CHECK-UNALIGNED: # %bb.0: # %entry
+; CHECK-UNALIGNED-NEXT: lbu a0, 0(a0)
+; CHECK-UNALIGNED-NEXT: lbu a1, 0(a1)
+; CHECK-UNALIGNED-NEXT: xor a0, a0, a1
+; CHECK-UNALIGNED-NEXT: snez a0, a0
+; CHECK-UNALIGNED-NEXT: ret
entry:
%bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iXLen 1)
ret i32 %bcmp
@@ -76,110 +156,82 @@ entry:
define i32 @bcmp_size_2(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_size_2:
@@ -197,142 +249,82 @@ entry:
define i32 @bcmp_size_3(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_size_3:
@@ -354,202 +346,82 @@ entry:
define i32 @bcmp_size_4(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_size_4:
@@ -567,234 +439,82 @@ entry:
define i32 @bcmp_size_5(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_size_5:
@@ -816,278 +536,82 @@ entry:
define i32 @bcmp_size_6(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 8
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a5, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a6, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a5, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a6, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_size_6:
@@ -1109,310 +633,82 @@ entry:
define i32 @bcmp_size_7(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV32-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV32-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_size_7:
@@ -1434,372 +730,82 @@ entry:
define i32 @bcmp_size_8(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a5, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a3, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, t0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a6, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a6, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: bcmp_size_8:
@@ -1899,88 +905,12 @@ define i32 @bcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_15:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_15:
@@ -1995,88 +925,12 @@ define i32 @bcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_15:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_15:
@@ -2091,72 +945,12 @@ define i32 @bcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_15:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_15:
@@ -2171,88 +965,12 @@ define i32 @bcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_15:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: bcmp_size_15:
@@ -2390,710 +1108,82 @@ entry:
define i32 @bcmp_size_16(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 10(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 12(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 13(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 14(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 14(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a5, a7, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 10(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 12(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 13(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 14(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 14(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a7, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 7(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a3, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, t1, t2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 10(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 8(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 9(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 10(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t3, 11(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a6, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a4, a4, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, t2, t3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a5, a6, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 14(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 12(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 13(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 14(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a7, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a5, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, t2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a5, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 15(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a7, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 10(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 12(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 13(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 14(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 14(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a7, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: bcmp_size_16:
@@ -3376,714 +1466,42 @@ define i32 @bcmp_size_32(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_32:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 15(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 17(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 22(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 23(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 18(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 22(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 24(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 25(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 28(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 29(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 30(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 24(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 25(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 26(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 27(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 28(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 29(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 30(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_32:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 15(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 17(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 22(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 23(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 18(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 22(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 24(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 25(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 28(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 29(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 30(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 24(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 25(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 26(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 27(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 28(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 29(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 30(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_32:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 15(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 15(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 22(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 23(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 16(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 17(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 18(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 19(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 22(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 23(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 16(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 17(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 18(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a7, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 28(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 29(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 30(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 31(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 24(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 25(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 26(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 27(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, t1, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 28(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 29(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 30(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 31(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 24(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 25(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 26(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 27(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, t1, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_32:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 15(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 17(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 22(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 23(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 18(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 22(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 24(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 25(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 28(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 29(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 30(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 24(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 25(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 26(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 27(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 28(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 29(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 30(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: bcmp_size_32:
@@ -4296,202 +1714,90 @@ entry:
define i1 @bcmp_eq_zero(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
; CHECK-ALIGNED-RV32-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
; CHECK-ALIGNED-RV64-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
; CHECK-ALIGNED-RV32-ZBB-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
; CHECK-ALIGNED-RV64-ZBB-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
; CHECK-ALIGNED-RV32-ZBKB-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
; CHECK-ALIGNED-RV64-ZBKB-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
; CHECK-ALIGNED-RV32-V-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
; CHECK-ALIGNED-RV64-V-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_eq_zero:
@@ -4508,10 +1814,98 @@ entry:
}
define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
-; CHECK-LABEL: bcmp_lt_zero:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: li a0, 0
-; CHECK-NEXT: ret
+; CHECK-ALIGNED-RV32-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-V-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-V-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-V-NEXT: ret
+;
+; CHECK-UNALIGNED-LABEL: bcmp_lt_zero:
+; CHECK-UNALIGNED: # %bb.0: # %entry
+; CHECK-UNALIGNED-NEXT: li a0, 0
+; CHECK-UNALIGNED-NEXT: ret
entry:
%bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iXLen 4)
%ret = icmp slt i32 %bcmp, 0
@@ -4521,202 +1915,90 @@ entry:
define i1 @bcmp_gt_zero(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_gt_zero:
@@ -4743,12 +2025,153 @@ entry:
}
define i32 @memcmp_size_1(ptr %s1, ptr %s2) nounwind optsize {
-; CHECK-LABEL: memcmp_size_1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lbu a0, 0(a0)
-; CHECK-NEXT: lbu a1, 0(a1)
-; CHECK-NEXT: sub a0, a0, a1
-; CHECK-NEXT: ret
+; CHECK-ALIGNED-RV32-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-V-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 1
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV32-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 1
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV64-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: lbu a0, 0(a0)
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: lbu a1, 0(a1)
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-ZBB-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: lbu a0, 0(a0)
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: lbu a1, 0(a1)
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-ZBKB-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lbu a0, 0(a0)
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lbu a1, 0(a1)
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-ZBKB-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: lbu a0, 0(a0)
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: lbu a1, 0(a1)
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 1)
ret i32 %memcmp
@@ -4757,156 +2180,102 @@ entry:
define i32 @memcmp_size_2(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a0, a0, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a1, a1, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a0, a0, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a1, a1, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a0, a0, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a1, a1, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a0, a0, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a1, a1, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-V-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_2:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lhu a0, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lhu a1, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a2, a2, -1
-; CHECK-UNALIGNED-RV32-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: and a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 2
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_2:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lhu a0, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lhu a1, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -1
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: and a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 2
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_2:
@@ -4955,36 +2324,22 @@ define i32 @memcmp_size_2(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_2:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a0, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a1, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a2, a2, -1
-; CHECK-UNALIGNED-RV32-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: and a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 2
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_2:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a0, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a1, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -1
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: and a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 2
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 2)
@@ -4994,278 +2349,102 @@ entry:
define i32 @memcmp_size_3(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: or a2, a6, a2
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-ALIGNED-RV32-NEXT: addi a3, a3, -1
-; CHECK-ALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a5, 16
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: and a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: or a2, a6, a2
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-ALIGNED-RV64-NEXT: addiw a3, a3, -1
-; CHECK-ALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: and a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: ret
-; CHECK-ALIGNED-RV64-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a6, a2
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: addi a3, a3, -1
-; CHECK-ALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a5, 16
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: and a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addiw a3, a3, -1
-; CHECK-ALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: ret
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_3:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lhu a2, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lhu a3, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a4, a4, -1
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB24_2
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lbu a0, 2(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lbu a1, 2(a1)
-; CHECK-UNALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB24_2: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 3
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_3:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lhu a2, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lhu a3, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -1
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a3, .LBB24_2
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 2(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 2(a1)
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB24_2: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 3
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_3:
@@ -5350,54 +2529,22 @@ define i32 @memcmp_size_3(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_3:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a2, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a3, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a4, a4, -1
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB24_2
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a0, 2(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a1, 2(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB24_2: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 3
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_3:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a2, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a3, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -1
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a3, .LBB24_2
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 2(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 2(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB24_2: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 3
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 3)
@@ -5407,284 +2554,102 @@ entry:
define i32 @memcmp_size_4(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV32-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: andi a0, a0, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: andi a1, a1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV64-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-;
-; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_4:
-; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sub a0, a2, a0
+;
+; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_4:
+; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: andi a0, a0, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: andi a1, a1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_4:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: sltu a2, a1, a0
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a0, a1
-; CHECK-UNALIGNED-RV32-NEXT: sub a0, a2, a0
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_4:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: sltu a2, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a0, a1
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a2, a0
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_4:
@@ -5737,60 +2702,22 @@ define i32 @memcmp_size_4(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_4:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a2, a1, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a0, a1
-; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a2, a0
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_4:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a2, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a0, a1
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a2, a0
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 4)
@@ -5800,354 +2727,102 @@ entry:
define i32 @memcmp_size_5(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb t1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: andi a5, a5, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: andi a3, t1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a4, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: ret
-; CHECK-ALIGNED-RV64-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb t1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: andi a5, a5, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: andi a3, t1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a4, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: ret
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_5:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a5, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a5, a5, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB26_2
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lbu a0, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lbu a1, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB26_2: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 5
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_5:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a3, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: lui a5, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a5, a5, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a2, a5
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a3, .LBB26_2
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 4(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 4(a1)
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB26_2: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 5
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_5:
@@ -6228,74 +2903,22 @@ define i32 @memcmp_size_5(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_5:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a5, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a5, a5, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB26_2
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a0, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a1, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB26_2: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 5
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_5:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a3, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a5, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a5, a5, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a2, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a3, .LBB26_2
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 4(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 4(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB26_2: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 5
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 5)
@@ -6305,456 +2928,102 @@ entry:
define i32 @memcmp_size_6(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a1
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb t1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: andi a5, a5, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: andi a3, t1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a4, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a1
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-NEXT: ret
-; CHECK-ALIGNED-RV64-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a1
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb t1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: andi a5, a5, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: andi a3, t1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a4, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-V-NEXT: ret
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_6:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a6, a2, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: srli a7, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: srli a7, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lhu a0, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lhu a1, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a3, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: addi a2, a2, -1
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV32-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB27_3: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 6
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_6:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a6, a2, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a4, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: lhu a0, 4(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lhu a1, 4(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a3, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -1
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB27_3: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 6
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_6:
@@ -6859,96 +3128,22 @@ define i32 @memcmp_size_6(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_6:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a6, a2, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a0, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a1, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a3, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a2, a2, -1
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB27_3: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 6
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_6:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a6, a2, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a4, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a0, 4(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a1, 4(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -1
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB27_3: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 6
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 6)
@@ -6958,476 +3153,102 @@ entry:
define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb t1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: andi a5, a5, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: andi a3, t1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a4, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: ret
-; CHECK-ALIGNED-RV64-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb t1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: andi a5, a5, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: andi a3, t1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a4, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: ret
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_7:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a6, a2, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: srli a7, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: srli a7, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lhu a3, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lhu a4, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: addi a2, a2, -1
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a4, a2
-; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-NEXT: lbu a0, 6(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lbu a1, 6(a1)
-; CHECK-UNALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB28_3: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 7
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_7:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a6, a2, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a4, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: lhu a3, 4(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lhu a4, 4(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -1
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a2
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 6(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 6(a1)
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB28_3: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 7
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_7:
@@ -7528,100 +3349,22 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_7:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a6, a2, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a3, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a4, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a2, a2, -1
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a4, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a0, 6(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a1, 6(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB28_3: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 7
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_7:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a6, a2, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a4, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a3, 4(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a4, 4(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -1
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 6(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 6(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB28_3: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 7
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 7)
@@ -7631,614 +3374,102 @@ entry:
define i32 @memcmp_size_8(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a7, a1
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a0
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB29_3: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV64-NEXT: or a5, a0, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a0
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t2
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t1
-; CHECK-ALIGNED-RV64-NEXT: slli t1, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-NEXT: srli t2, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: lui t3, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t2, t2, t3
-; CHECK-ALIGNED-RV64-NEXT: srli t4, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: li t5, 255
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: and t4, t4, t5
-; CHECK-ALIGNED-RV64-NEXT: or t2, t4, t2
-; CHECK-ALIGNED-RV64-NEXT: srliw a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: or a4, t2, a4
-; CHECK-ALIGNED-RV64-NEXT: srliw a5, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, t3
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: lui a6, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a6, a6, -256
-; CHECK-ALIGNED-RV64-NEXT: and a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: and a3, a3, t3
-; CHECK-ALIGNED-RV64-NEXT: srli a4, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: and a4, a4, t5
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, t0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-NEXT: srliw a3, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: and a4, t1, t3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: and a4, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: sltu a1, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a1, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB29_3: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB29_3: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a6, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a6, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a7, a1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a0
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB29_3: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t1
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srli t2, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui t3, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t2, t2, t3
-; CHECK-ALIGNED-RV64-V-NEXT: srli t4, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li t5, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and t4, t4, t5
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t4, t2
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, t2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a5, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, t3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lui a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a6, a6, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, a3, t3
-; CHECK-ALIGNED-RV64-V-NEXT: srli a4, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, a4, t5
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, t0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a3, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, t1, t3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a1, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a1, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_8:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a2, a2, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a2
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB29_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a1, a0
-; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB29_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV32-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB29_3: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 8
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_8:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a0, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a1, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a0, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a6, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a6, a6, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a0, a6
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a1, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a1, a6
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: sltu a2, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a0, a1
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a2, a0
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 8
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_8:
@@ -8309,114 +3540,22 @@ define i32 @memcmp_size_8(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_8:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a2, a2, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB29_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a1, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB29_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB29_3: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 8
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_8:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a0, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a1, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a0, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a6, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a6, a6, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a0, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a1, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a2, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a0, a1
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a2, a0
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 8)
@@ -8437,160 +3576,11 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_15:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: or a6, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a6, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t1, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, t3, t0
-; CHECK-ALIGNED-RV64-NEXT: srli t4, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: lui t5, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t4, t4, t5
-; CHECK-ALIGNED-RV64-NEXT: srli t6, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: li s0, 255
-; CHECK-ALIGNED-RV64-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-NEXT: and t6, t6, s0
-; CHECK-ALIGNED-RV64-NEXT: or t4, t6, t4
-; CHECK-ALIGNED-RV64-NEXT: srliw a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-NEXT: or a6, t4, a4
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, t5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-NEXT: and a5, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-NEXT: srli a5, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a5, a5, t5
-; CHECK-ALIGNED-RV64-NEXT: srli a6, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, s0
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: srliw a6, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: srliw a6, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-NEXT: and a7, t3, t5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: and a7, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 56
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a6, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb t1, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli t0, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli t0, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: and a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: andi a6, a6, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: and a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: andi a6, t1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 32
-; CHECK-ALIGNED-RV64-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: ret
-; CHECK-ALIGNED-RV64-NEXT: .LBB30_4: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_15:
@@ -8605,102 +3595,12 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_15:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB30_4: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_15:
@@ -8715,90 +3615,12 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_15:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB30_4: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_15:
@@ -8814,160 +3636,11 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_15:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-V-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a6, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t1, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t3, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli t4, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui t5, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t4, t4, t5
-; CHECK-ALIGNED-RV64-V-NEXT: srli t6, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li s0, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, t6, s0
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t6, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t4, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a5, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-V-NEXT: srli a5, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a5, a5, t5
-; CHECK-ALIGNED-RV64-V-NEXT: srli a6, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, s0
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a6, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a6, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, t3, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-V-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a6, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb t1, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: andi a6, a6, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: andi a6, t1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: ret
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB30_4: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_15:
@@ -8982,105 +3655,12 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_15:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a3, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a4, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a5, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a7, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a3, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a3, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a4, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a4, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB30_4
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: lw a3, 8(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a4, 8(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a4, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-UNALIGNED-RV64-NEXT: srli a3, a3, 32
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 32
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 32
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB30_4
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-NEXT: lhu a2, 12(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lhu a3, 12(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a5, a3, -1
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a2, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB30_4
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 14(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 14(a1)
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB30_4: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 15
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_15:
@@ -9207,105 +3787,12 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_15:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a3, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a4, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a5, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a7, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a3, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a3, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a4, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a4, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB30_4
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a3, 8(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a4, 8(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a4, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a3, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB30_4
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a2, 12(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a3, 12(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a5, a3, -1
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a2, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB30_4
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 14(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 14(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB30_4: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 15
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 15)
@@ -9315,1169 +3802,102 @@ entry:
define i32 @memcmp_size_16(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 7(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 14(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a7, a1
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a0
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV32-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB31_5: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB31_3
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV64-NEXT: or t0, a0, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t2, t2, a0
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t5
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t4
-; CHECK-ALIGNED-RV64-NEXT: slli t4, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or t4, t4, t2
-; CHECK-ALIGNED-RV64-NEXT: srli t5, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: and t5, t5, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t6, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: and t6, t6, a3
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, t5, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: srli a6, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, a2
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: and a3, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, t3, a1
-; CHECK-ALIGNED-RV64-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-NEXT: srliw a3, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: and a2, t4, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: and a3, t2, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: or a6, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB31_3
-; CHECK-ALIGNED-RV64-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-NEXT: ret
-; CHECK-ALIGNED-RV64-NEXT: .LBB31_3: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 7(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 8(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 13(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 12(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 13(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 14(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV32-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB31_5: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB31_3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB31_3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB31_3: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 7(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 14(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB31_5: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB31_3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 15(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a6, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 15(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a6, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB31_3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB31_3: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 7(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 14(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a7, a1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a0
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB31_5: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-V-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB31_3
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, a0, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t5
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t4, t2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t5, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and t5, t5, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t6, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, t6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srli a6, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, t3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a3, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a2, t4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, t2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB31_3
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-V-NEXT: ret
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB31_3: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_16:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 8(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 8(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 12(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 12(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a1, a0
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV32-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB31_5: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 16
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
-;
-; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_16:
-; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a5, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a3, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a2, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a5, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a4, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a5, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a5, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a5, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a5, a6, .LBB31_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: ld a0, 8(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a1, 8(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a0, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a1, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: bne a5, a6, .LBB31_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB31_3: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+;
+; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_16:
+; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 16
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_16:
@@ -10594,211 +4014,22 @@ define i32 @memcmp_size_16(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_16:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 8(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 8(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 12(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 12(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a1, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB31_5: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 16
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_16:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a5, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a5, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a4, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a5, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a5, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a5, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a5, a6, .LBB31_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a0, 8(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a1, 8(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a5, a6, .LBB31_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB31_3: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 16
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 16)
@@ -10962,1311 +4193,51 @@ define i32 @memcmp_size_32(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_32:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 15(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or t0, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t3, t3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t6, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu s0, 15(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t6, 8
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t5, 32
-; CHECK-ALIGNED-RV64-NEXT: or t6, t6, t3
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a2
-; CHECK-ALIGNED-RV64-NEXT: srli s1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: and s1, s1, a3
-; CHECK-ALIGNED-RV64-NEXT: or s0, s1, s0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-NEXT: or t0, s0, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t2, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t6, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t6, 8
-; CHECK-ALIGNED-RV64-NEXT: and a3, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw a7, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t4, a7
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw a7, t6, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-NEXT: and a2, t6, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: and a4, t3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: or a6, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 17(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 18(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 19(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 20(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 21(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 22(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 23(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 17(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 18(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 20(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 21(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 22(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 23(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 25(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 24(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 26(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 28(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 29(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 30(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV64-NEXT: or t0, a0, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 24(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 25(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 26(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 27(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t2, t2, a0
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 28(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 29(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 30(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t5
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t4
-; CHECK-ALIGNED-RV64-NEXT: slli t4, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or t4, t4, t2
-; CHECK-ALIGNED-RV64-NEXT: srli t5, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: and t5, t5, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t6, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: and t6, t6, a3
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, t5, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: srli a6, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, a2
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: and a3, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, t3, a1
-; CHECK-ALIGNED-RV64-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-NEXT: srliw a3, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: and a2, t4, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: and a3, t2, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: or a6, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV64-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-NEXT: j .LBB33_6
-; CHECK-ALIGNED-RV64-NEXT: .LBB33_5: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
-; CHECK-ALIGNED-RV64-NEXT: .LBB33_6: # %endblock
-; CHECK-ALIGNED-RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; CHECK-ALIGNED-RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_32:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 15(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 15(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 17(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 16(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 19(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 20(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 21(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 23(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 16(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 17(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 18(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 19(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 20(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 21(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 22(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 23(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 25(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 24(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 26(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 27(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 28(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 29(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 30(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 24(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 25(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 26(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 27(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 28(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 29(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 30(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV64-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB33_5: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_32:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 15(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 20(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 21(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 23(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 16(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 17(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 18(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 20(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 21(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 22(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 23(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 16(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 17(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 18(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 28(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 29(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 30(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 31(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 24(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 25(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 26(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 27(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a6, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 28(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 29(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 30(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 31(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 24(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 25(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 26(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 27(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a6, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB33_5: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_32:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-V-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-V-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 15(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t6, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu s0, 15(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, t6, t3
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli s1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and s1, s1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or s0, s1, s0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, s0, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t2, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a7, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a7, t6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a2, t6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, t3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 17(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 18(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 19(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 20(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 21(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 22(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 23(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 17(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 18(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 20(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 21(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 22(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 23(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 25(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 24(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 26(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 28(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 29(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 30(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, a0, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 24(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 25(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 26(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 27(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 28(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 29(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 30(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t5
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t4, t2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t5, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and t5, t5, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t6, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, t6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srli a6, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, t3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a3, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a2, t4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, t2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-V-NEXT: j .LBB33_6
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB33_5: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB33_6: # %endblock
-; CHECK-ALIGNED-RV64-V-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; CHECK-ALIGNED-RV64-V-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_32:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 8(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 8(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 16(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-NEXT: ld a0, 24(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a1, 24(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a6, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a5, a0
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a1, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a5, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB33_5: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 32
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV64-ZBB-LABEL: memcmp_size_32:
@@ -12339,206 +4310,12 @@ define i32 @memcmp_size_32(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_32:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 8(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 8(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 16(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a0, 24(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a1, 24(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a6, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a5, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a5, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB33_5: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 32)
@@ -12671,200 +4448,200 @@ entry:
define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
+; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
+; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV32-NEXT: lbu a2, 2(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a3
+; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a2
+; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a3
; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV32-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
+; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a0)
+; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a3
+; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
+; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a3
; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV64-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
+; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a0)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
+; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
+; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a3
; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV32-ZBB-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
+; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a0)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
+; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
+; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a3
; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV64-ZBB-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a1)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a1)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a1)
; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a0)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a0)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a0)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a4, a1
; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a7, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV32-ZBKB-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a1)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a1)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a0)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a5
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV64-ZBKB-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a1)
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
+; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
+; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 2(a0)
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a3
+; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a2
+; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a3
; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV32-V-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
+; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a1)
+; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
+; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
+; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
+; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a0)
+; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a3
+; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
+; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a3
; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV64-V-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV64-V-NEXT: ret
@@ -12885,264 +4662,112 @@ entry:
define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: andi a0, a0, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: andi a1, a1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: andi a0, a0, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: andi a1, a1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_lt_zero:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a0, a1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: srli a0, a0, 31
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_lt_zero:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a0, a1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_lt_zero:
@@ -13187,56 +4812,24 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_lt_zero:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a0, a1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a0, a0, 31
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_lt_zero:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a0, a1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 4)
@@ -13247,264 +4840,112 @@ entry:
define i1 @memcmp_gt_zero(ptr %s1, ptr %s2) nounwind optsize {
; CHECK-ALIGNED-RV32-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: andi a0, a0, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: andi a1, a1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: andi a0, a0, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: andi a1, a1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_gt_zero:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: sgtz a0, a0
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_gt_zero:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: sgtz a0, a0
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_gt_zero:
@@ -13549,56 +4990,24 @@ define i1 @memcmp_gt_zero(ptr %s1, ptr %s2) nounwind optsize {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_gt_zero:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: sgtz a0, a0
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_gt_zero:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: sgtz a0, a0
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 4)
diff --git a/llvm/test/CodeGen/RISCV/memcmp.ll b/llvm/test/CodeGen/RISCV/memcmp.ll
index 00893a8dbe0f7e..f0c14ccb0d5f23 100644
--- a/llvm/test/CodeGen/RISCV/memcmp.ll
+++ b/llvm/test/CodeGen/RISCV/memcmp.ll
@@ -61,13 +61,93 @@ entry:
}
define i32 @bcmp_size_1(ptr %s1, ptr %s2) nounwind {
-; CHECK-LABEL: bcmp_size_1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lbu a0, 0(a0)
-; CHECK-NEXT: lbu a1, 0(a1)
-; CHECK-NEXT: xor a0, a0, a1
-; CHECK-NEXT: snez a0, a0
-; CHECK-NEXT: ret
+; CHECK-ALIGNED-RV32-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_1:
+; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-V-NEXT: ret
+;
+; CHECK-UNALIGNED-LABEL: bcmp_size_1:
+; CHECK-UNALIGNED: # %bb.0: # %entry
+; CHECK-UNALIGNED-NEXT: lbu a0, 0(a0)
+; CHECK-UNALIGNED-NEXT: lbu a1, 0(a1)
+; CHECK-UNALIGNED-NEXT: xor a0, a0, a1
+; CHECK-UNALIGNED-NEXT: snez a0, a0
+; CHECK-UNALIGNED-NEXT: ret
entry:
%bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iXLen 1)
ret i32 %bcmp
@@ -76,110 +156,82 @@ entry:
define i32 @bcmp_size_2(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_2:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_size_2:
@@ -197,142 +249,82 @@ entry:
define i32 @bcmp_size_3(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_3:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_size_3:
@@ -354,202 +346,82 @@ entry:
define i32 @bcmp_size_4(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_4:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_size_4:
@@ -567,234 +439,82 @@ entry:
define i32 @bcmp_size_5(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_5:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_size_5:
@@ -816,278 +536,82 @@ entry:
define i32 @bcmp_size_6(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 8
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a5, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a6, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a5, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a6, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_6:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_size_6:
@@ -1109,310 +633,82 @@ entry:
define i32 @bcmp_size_7(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV32-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV32-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_7:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_size_7:
@@ -1434,372 +730,82 @@ entry:
define i32 @bcmp_size_8(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a5, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a3, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, t0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a6, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a6, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_8:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: bcmp_size_8:
@@ -1889,646 +895,82 @@ entry:
define i32 @bcmp_size_15(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_15:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 10(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 12(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 13(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a6, t0, a7
-; CHECK-ALIGNED-RV32-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 15
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_15:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_15:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 10(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 12(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t0, 13(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a6, t0, a7
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 15
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_15:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_15:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 7(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a3, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, t1, t2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 10(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 8(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 9(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 10(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t3, 11(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a6, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a4, a4, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, t2, t3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a5, a6, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 12(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 13(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a6, a7, t0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 15
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_15:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_15:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 10(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 12(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 13(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a6, t0, a7
-; CHECK-ALIGNED-RV32-V-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 15
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_15:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a3, a0
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: bcmp_size_15:
@@ -2666,710 +1108,82 @@ entry:
define i32 @bcmp_size_16(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 10(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 12(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 13(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 14(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 14(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a5, a7, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 10(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 12(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 13(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 14(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 14(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a7, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 7(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a3, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, t1, t2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 10(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 8(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 9(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 10(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t3, 11(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a6, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a4, a4, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, t2, t3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a5, a6, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 14(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 12(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 13(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 14(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a7, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a5, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, t2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a5, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 15(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a7, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 10(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 12(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 13(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 14(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 14(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a7, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_16:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: bcmp_size_16:
@@ -3517,180 +1331,12 @@ define i32 @bcmp_size_31(ptr %s1, ptr %s2) nounwind {
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_31:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 15(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 17(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 22(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 23(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 18(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 22(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 24(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 25(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 24(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 25(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 26(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 27(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 28(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 29(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 30(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 28(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 29(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 30(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: xor a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: or a0, a6, a0
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 31
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_31:
@@ -3705,180 +1351,12 @@ define i32 @bcmp_size_31(ptr %s1, ptr %s2) nounwind {
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_31:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 15(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 17(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 22(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 23(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 18(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 22(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 24(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 25(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 24(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 25(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 26(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 27(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 28(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 29(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 30(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 28(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 29(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 30(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t1, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a6, a6, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a6, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 31
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_31:
@@ -3893,144 +1371,12 @@ define i32 @bcmp_size_31(ptr %s1, ptr %s2) nounwind {
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_31:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 15(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 15(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 22(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 23(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 16(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 17(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 18(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 19(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 22(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 23(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 16(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 17(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 18(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a7, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 24(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 25(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 26(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 27(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 24(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 25(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 26(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 27(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a7, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 28(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 29(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 28(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 29(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 30(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 30(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a7, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a6, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a6, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 31
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_31:
@@ -4045,180 +1391,12 @@ define i32 @bcmp_size_31(ptr %s1, ptr %s2) nounwind {
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_31:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 15(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 17(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 22(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 23(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 18(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 22(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 24(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 25(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 24(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 25(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 26(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 27(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 28(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 29(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 30(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 28(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 29(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 30(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: xor a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a6, a0
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 31
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: bcmp_size_31:
@@ -4452,1390 +1630,82 @@ entry:
define i32 @bcmp_size_32(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: bcmp_size_32:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 10(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 12(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 13(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 14(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 15(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 12(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 13(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 14(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 15(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV32-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 16(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 17(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 18(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 19(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 16(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 17(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 18(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV32-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV32-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 20(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 21(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a6, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 22(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu t2, 23(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV32-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 20(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t2, 21(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 22(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t3, 23(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV32-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV32-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV32-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV32-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 24(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu t2, 25(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor a7, a7, t0
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 26(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu t3, 27(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV32-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV32-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV32-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV32-NEXT: lbu t2, 24(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t3, 25(a1)
-; CHECK-ALIGNED-RV32-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 26(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t4, 27(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV32-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV32-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV32-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV32-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV32-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV32-NEXT: lbu t2, 28(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu t3, 29(a0)
-; CHECK-ALIGNED-RV32-NEXT: xor t0, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 30(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV32-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV32-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 28(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t3, 29(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, t2
-; CHECK-ALIGNED-RV32-NEXT: lbu t2, 30(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV32-NEXT: or t1, t3, t1
-; CHECK-ALIGNED-RV32-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, t2
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, t1
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: or a1, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a0, t0, a0
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: or a0, a1, a0
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 32
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_32:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 15(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 17(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 22(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 23(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 18(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 22(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 24(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 25(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 28(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 29(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 30(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 24(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 25(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 26(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 27(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 28(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 29(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 30(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_size_32:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 10(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 12(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 13(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 14(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t0, 15(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 12(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t0, 13(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 14(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t1, 15(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 16(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t0, 17(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 18(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t1, 19(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t0, 16(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t1, 17(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 18(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t0, 20(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t1, 21(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a6, a6, a7
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a7, 22(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t2, 23(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t1, 20(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t2, 21(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t0, 22(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t3, 23(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t1, 24(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t2, 25(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a7, a7, t0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t0, 26(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t3, 27(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t2, 24(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t3, 25(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t1, 26(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t4, 27(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t2, 28(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t3, 29(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor t0, t0, t1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t1, 30(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t1, 28(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t3, 29(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, t2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu t2, 30(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or t1, t3, t1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, t2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, t1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a6, a7
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, t0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a1, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 32
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_32:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 15(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 17(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 22(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 23(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 18(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 22(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 24(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 25(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 28(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 29(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 30(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 24(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 25(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 26(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 27(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 28(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 29(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 30(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_size_32:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 7(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a3, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, t1, t2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 10(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 8(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 9(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 10(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t3, 11(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a6, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a4, a4, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, t2, t3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a5, a6, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 14(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 15(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 12(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 13(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t3, 14(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t4, 15(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a7, a7, t0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a5, a5, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a6, t3, t4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a7, t1, t2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a6, a7, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 16(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 17(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 18(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 19(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 16(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t3, 17(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t4, 18(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t5, 19(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh t0, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a6, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a6, a6, t0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a7, t4, t5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh t0, t2, t3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a7, t0, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a6, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 20(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 21(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 22(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 23(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t3, 20(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t4, 21(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t5, 22(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t6, 23(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh t1, t1, t2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a7, a7, t0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a7, a7, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh t0, t5, t6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh t1, t3, t4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack t0, t1, t0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a7, a7, t0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 24(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 25(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 26(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t3, 27(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t4, 24(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t5, 25(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t6, 26(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu s0, 27(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh t2, t2, t3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh t0, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack t0, t0, t2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh t1, t6, s0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh t2, t4, t5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack t1, t2, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor t0, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 28(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t2, 29(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t3, 30(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t4, 28(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t5, 29(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t6, 30(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, t3, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh t1, t1, t2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, t1, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, t6, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh t1, t4, t5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, t1, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a1, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, t0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a1, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 32
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_32:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 15(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 15(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 22(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 23(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 16(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 17(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 18(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 19(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 22(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 23(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 16(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 17(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 18(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a7, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 28(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 29(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 30(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 31(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 24(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 25(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 26(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 27(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, t1, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 28(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 29(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 30(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 31(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 24(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 25(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 26(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 27(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, t1, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_size_32:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 10(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 12(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 13(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 14(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 15(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 12(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 13(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 14(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 15(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 16(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 17(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 18(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 19(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 16(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 17(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 18(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 20(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 21(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a6, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 22(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t2, 23(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 20(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t2, 21(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 22(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t3, 23(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV32-V-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 24(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t2, 25(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor a7, a7, t0
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 26(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t3, 27(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t2, 24(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t3, 25(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 26(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t4, 27(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV32-V-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV32-V-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t2, 28(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t3, 29(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: xor t0, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 30(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV32-V-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 28(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t3, 29(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, t2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t2, 30(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or t1, t3, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, t2
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, t1
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, t0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a1, a0
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 32
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_32:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 15(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 17(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 22(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 23(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 18(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 22(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 24(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 25(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 28(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 29(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 30(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 24(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 25(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 26(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 27(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 28(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 29(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 30(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: bcmp_size_32:
@@ -6278,1410 +2148,42 @@ define i32 @bcmp_size_64(ptr %s1, ptr %s2) nounwind {
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_size_64:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 15(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 17(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 22(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 23(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 18(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 22(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 24(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 25(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 28(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 29(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 30(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 31(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 24(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 25(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 26(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 27(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 28(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 29(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 30(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 31(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 32(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 33(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 34(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 35(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 36(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 37(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 38(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 39(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 32(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 33(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 34(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 35(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 36(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 37(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 38(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 39(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 40(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 41(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 42(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 43(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 44(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 45(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 46(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 47(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 40(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 41(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 42(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 43(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 44(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 45(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 46(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 47(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 48(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 49(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 50(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 51(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 52(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 53(a0)
-; CHECK-ALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 54(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 55(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 48(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 49(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 50(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 51(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 52(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 53(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 54(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 55(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: or t2, t5, t2
-; CHECK-ALIGNED-RV64-NEXT: or t2, t2, t3
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 56(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 57(a0)
-; CHECK-ALIGNED-RV64-NEXT: xor t0, t0, t1
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 58(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 59(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 60(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 61(a0)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 62(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 63(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, t2
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, t3
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 56(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 57(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 58(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 59(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 60(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 61(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 62(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 63(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t2
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t3
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t1
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: or a1, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: or a0, t0, a0
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: or a0, a1, a0
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 64
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_size_64:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 15(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 17(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 22(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 23(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 18(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 22(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 24(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 25(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 28(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 29(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 30(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 31(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 24(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 25(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 26(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 27(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 28(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 29(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 30(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 31(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 32(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 33(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 34(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 35(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 36(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 37(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 38(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 39(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 32(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 33(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 34(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 35(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 36(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 37(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 38(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t3, 39(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 40(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 41(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a6, a6, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 42(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 43(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 44(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 45(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 46(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t3, 47(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 40(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 41(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 42(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t3, 43(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 44(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t3, 45(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 46(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t4, 47(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 48(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 49(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a7, a7, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t0, 50(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t3, 51(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 52(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t3, 53(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 54(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t4, 55(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 48(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t3, 49(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 50(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t4, 51(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t3, 52(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t4, 53(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 54(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t5, 55(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t2, t5, t2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t2, t2, t3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 56(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t3, 57(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor t0, t0, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 58(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t4, 59(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t3, 60(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t4, 61(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 62(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 63(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, t2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, t3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 56(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t3, 57(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t1, 58(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t4, 59(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t3, 60(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t4, 61(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu t2, 62(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 63(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, t2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, t3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, t1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a6, a7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, t0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a1, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 64
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_size_64:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 15(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 15(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 22(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 23(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 16(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 17(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 18(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 19(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 22(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 23(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 16(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 17(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 18(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a7, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 28(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 29(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 30(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 31(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 24(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 25(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 26(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 27(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a7, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 28(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 29(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 30(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 31(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a7, t2, t3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 24(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 25(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 26(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 27(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a7, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t0, t2, t3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 36(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 37(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 38(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 39(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a6, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a7, t2, t3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 32(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 33(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 34(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 35(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a7, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t0, t2, t3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 36(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 37(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 38(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 39(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a6, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a7, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t0, t3, t4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 32(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 33(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 34(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 35(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t0, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t1, t3, t4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 44(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 45(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 46(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 47(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a6, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a7, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t0, t3, t4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t1, 40(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 41(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 42(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 43(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t0, t1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t1, t3, t4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 44(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 45(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 46(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t5, 47(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a7, t0, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t0, t2, t3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t1, t4, t5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 40(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 41(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 42(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t5, 43(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t1, t2, t3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t2, t4, t5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 52(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 53(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 54(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t5, 55(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack t0, t1, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a7, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t0, t2, t3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t1, t4, t5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t2, 48(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 49(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 50(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t5, 51(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t1, t2, t3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t2, t4, t5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 52(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 53(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t5, 54(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t6, 55(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack t0, t1, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t1, t3, t4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t2, t5, t6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 48(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 49(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t5, 50(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t6, 51(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t2, t3, t4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t3, t5, t6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t3, t3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 60(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 61(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t5, 62(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t6, 63(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack t1, t2, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor t0, t0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t1, t3, t4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t2, t5, t6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 56(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 57(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t5, 58(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 59(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t2, t3, t4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, t5, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 60(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 61(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t5, 62(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t6, 63(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t1, t3, t4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t2, t5, t6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t3, 56(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t4, 57(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t5, 58(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 59(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh t2, t3, t4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, t5, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, t2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, t1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, t0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a1, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 64
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_size_64:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 15(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 17(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 22(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 23(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 18(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 20(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 21(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 22(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 24(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 25(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 28(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 29(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 30(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 31(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 24(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 25(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 26(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 27(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 28(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 29(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 30(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 31(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 32(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 33(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 34(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 35(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 36(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 37(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 38(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 39(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 32(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 33(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 34(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 35(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 36(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 37(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 38(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 39(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 40(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 41(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 42(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 43(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 44(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 45(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 46(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 47(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 40(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 41(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 42(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 43(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 44(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 45(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 46(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 47(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 48(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 49(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 50(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 51(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t3, t0
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 52(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 53(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 54(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 55(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 48(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 49(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 50(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 51(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 52(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 53(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 54(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 55(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t5, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t2, t3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 56(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 57(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: xor t0, t0, t1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 58(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 59(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 60(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 61(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 62(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 63(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, t3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 56(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 57(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 58(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 59(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t4, t1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 60(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 61(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 62(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 63(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t1
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, t0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a1, a0
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 64
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: bcmp_size_64:
@@ -7908,202 +2410,90 @@ entry:
define i1 @bcmp_eq_zero(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
; CHECK-ALIGNED-RV32-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
; CHECK-ALIGNED-RV64-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
; CHECK-ALIGNED-RV32-ZBB-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
; CHECK-ALIGNED-RV64-ZBB-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
; CHECK-ALIGNED-RV32-ZBKB-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
; CHECK-ALIGNED-RV64-ZBKB-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
; CHECK-ALIGNED-RV32-V-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_eq_zero:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
; CHECK-ALIGNED-RV64-V-NEXT: seqz a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_eq_zero:
@@ -8120,10 +2510,98 @@ entry:
}
define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
-; CHECK-LABEL: bcmp_lt_zero:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: li a0, 0
-; CHECK-NEXT: ret
+; CHECK-ALIGNED-RV32-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-V-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-V-LABEL: bcmp_lt_zero:
+; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-V-NEXT: ret
+;
+; CHECK-UNALIGNED-LABEL: bcmp_lt_zero:
+; CHECK-UNALIGNED: # %bb.0: # %entry
+; CHECK-UNALIGNED-NEXT: li a0, 0
+; CHECK-UNALIGNED-NEXT: ret
entry:
%bcmp = call signext i32 @bcmp(ptr %s1, ptr %s2, iXLen 4)
%ret = icmp slt i32 %bcmp, 0
@@ -8133,202 +2611,90 @@ entry:
define i1 @bcmp_gt_zero(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
-; CHECK-ALIGNED-RV32-V-LABEL: bcmp_gt_zero:
-; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV32-V-LABEL: bcmp_gt_zero:
+; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV32-V-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: bcmp_gt_zero:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: snez a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call bcmp
+; CHECK-ALIGNED-RV64-V-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-LABEL: bcmp_gt_zero:
@@ -8355,12 +2721,153 @@ entry:
}
define i32 @memcmp_size_1(ptr %s1, ptr %s2) nounwind {
-; CHECK-LABEL: memcmp_size_1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lbu a0, 0(a0)
-; CHECK-NEXT: lbu a1, 0(a1)
-; CHECK-NEXT: sub a0, a0, a1
-; CHECK-NEXT: ret
+; CHECK-ALIGNED-RV32-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV32: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 1
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_1:
+; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 1
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-V-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 1
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV32-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 1
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV64-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: lbu a0, 0(a0)
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: lbu a1, 0(a1)
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-ZBB-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: lbu a0, 0(a0)
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: lbu a1, 0(a1)
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV64-ZBB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-ZBKB-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV32-ZBKB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lbu a0, 0(a0)
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: lbu a1, 0(a1)
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV32-ZBKB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-ZBKB-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV64-ZBKB: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: lbu a0, 0(a0)
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: lbu a1, 0(a1)
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV32-V-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_1:
+; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 1)
ret i32 %memcmp
@@ -8369,156 +2876,102 @@ entry:
define i32 @memcmp_size_2(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a0, a0, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a1, a1, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a0, a0, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a1, a1, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a0, a0, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a1, a1, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a0, a0, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a1, a1, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-V-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 2
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_2:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a0, a1
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 2
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_2:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lhu a0, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lhu a1, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a2, a2, -1
-; CHECK-UNALIGNED-RV32-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: and a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 2
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_2:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lhu a0, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lhu a1, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -1
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: and a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 2
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_2:
@@ -8567,36 +3020,22 @@ define i32 @memcmp_size_2(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_2:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a0, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a1, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a2, a2, -1
-; CHECK-UNALIGNED-RV32-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: and a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 2
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_2:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a0, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a1, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -1
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: and a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 2
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 2)
@@ -8606,278 +3045,102 @@ entry:
define i32 @memcmp_size_3(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: or a2, a6, a2
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-ALIGNED-RV32-NEXT: addi a3, a3, -1
-; CHECK-ALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a5, 16
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: and a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: or a2, a6, a2
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-ALIGNED-RV64-NEXT: addiw a3, a3, -1
-; CHECK-ALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: and a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: ret
-; CHECK-ALIGNED-RV64-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a6, a2
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: addi a3, a3, -1
-; CHECK-ALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a5, 16
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: and a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 3
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_3:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addiw a3, a3, -1
-; CHECK-ALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB24_2
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: ret
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB24_2: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 3
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_3:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lhu a2, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lhu a3, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a4, a4, -1
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB24_2
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lbu a0, 2(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lbu a1, 2(a1)
-; CHECK-UNALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB24_2: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 3
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_3:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lhu a2, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lhu a3, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -1
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a3, .LBB24_2
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 2(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 2(a1)
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB24_2: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 3
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_3:
@@ -8962,54 +3225,22 @@ define i32 @memcmp_size_3(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_3:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a2, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a3, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a4, a4, -1
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB24_2
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a0, 2(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a1, 2(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB24_2: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 3
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_3:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a2, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a3, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -1
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a3, .LBB24_2
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 2(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 2(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB24_2: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 3
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 3)
@@ -9019,284 +3250,102 @@ entry:
define i32 @memcmp_size_4(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV32-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: andi a0, a0, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: andi a1, a1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV64-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_4:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: andi a0, a0, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: andi a1, a1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_4:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: sltu a2, a1, a0
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a0, a1
-; CHECK-UNALIGNED-RV32-NEXT: sub a0, a2, a0
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_4:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: sltu a2, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a0, a1
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a2, a0
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_4:
@@ -9349,60 +3398,22 @@ define i32 @memcmp_size_4(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_4:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a2, a1, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a0, a1
-; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a2, a0
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_4:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a2, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a0, a1
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a2, a0
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 4)
@@ -9412,354 +3423,102 @@ entry:
define i32 @memcmp_size_5(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb t1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: andi a5, a5, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: andi a3, t1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a4, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: ret
-; CHECK-ALIGNED-RV64-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_5:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 5
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_5:
-; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb t1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: andi a5, a5, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: andi a3, t1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a4, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB26_2
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: ret
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB26_2: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 5
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_5:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a5, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a5, a5, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: slli a5, a5, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB26_2
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lbu a0, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lbu a1, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB26_2: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 5
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_5:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a3, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: lui a5, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a5, a5, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a2, a5
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a3, .LBB26_2
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 4(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 4(a1)
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB26_2: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 5
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_5:
@@ -9840,74 +3599,22 @@ define i32 @memcmp_size_5(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_5:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a5, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a5, a5, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a5, a5, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB26_2
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a0, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a1, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB26_2: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 5
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_5:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a3, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a5, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a5, a5, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a2, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a3, .LBB26_2
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 4(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 4(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB26_2: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 5
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 5)
@@ -9917,456 +3624,102 @@ entry:
define i32 @memcmp_size_6(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a1
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb t1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: andi a5, a5, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: andi a3, t1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a4, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a1
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-NEXT: ret
-; CHECK-ALIGNED-RV64-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a1
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 6
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_6:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb t1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: andi a5, a5, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: andi a3, t1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a4, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB27_3
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-V-NEXT: ret
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB27_3: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 6
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_6:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a6, a2, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: srli a7, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: srli a7, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lhu a0, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lhu a1, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a3, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: addi a2, a2, -1
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV32-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB27_3: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 6
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_6:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a6, a2, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a4, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: lhu a0, 4(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lhu a1, 4(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a3, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -1
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB27_3: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 6
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_6:
@@ -10471,96 +3824,22 @@ define i32 @memcmp_size_6(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_6:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a6, a2, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a0, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a1, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a3, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a2, a2, -1
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB27_3: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 6
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_6:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a6, a2, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a4, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a0, 4(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a1, 4(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -1
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB27_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB27_3: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 6
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 6)
@@ -10570,476 +3849,102 @@ entry:
define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb t1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: andi a5, a5, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: andi a3, t1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a4, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: ret
-; CHECK-ALIGNED-RV64-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 7
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_7:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb t1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: andi a5, a5, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: andi a3, t1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a4, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB28_3
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: ret
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB28_3: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 7
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_7:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a6, a2, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: srli a7, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: srli a7, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lhu a3, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lhu a4, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: addi a2, a2, -1
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a4, a2
-; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-NEXT: lbu a0, 6(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lbu a1, 6(a1)
-; CHECK-UNALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB28_3: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 7
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_7:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a6, a2, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a4, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: lhu a3, 4(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lhu a4, 4(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -1
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a2
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 6(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 6(a1)
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB28_3: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 7
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_7:
@@ -11140,100 +4045,22 @@ define i32 @memcmp_size_7(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_7:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a6, a2, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a3, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a4, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a2, a2, -1
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a4, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a0, 6(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a1, 6(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB28_3: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 7
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_7:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a6, a2, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a3, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a4, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a3, 4(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a4, 4(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -1
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB28_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 6(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 6(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB28_3: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 7
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 7)
@@ -11243,614 +4070,102 @@ entry:
define i32 @memcmp_size_8(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a7, a1
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a0
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB29_3: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV64-NEXT: or a5, a0, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a0
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t2
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t1
-; CHECK-ALIGNED-RV64-NEXT: slli t1, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-NEXT: srli t2, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: lui t3, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t2, t2, t3
-; CHECK-ALIGNED-RV64-NEXT: srli t4, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: li t5, 255
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: and t4, t4, t5
-; CHECK-ALIGNED-RV64-NEXT: or t2, t4, t2
-; CHECK-ALIGNED-RV64-NEXT: srliw a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-NEXT: or a4, t2, a4
-; CHECK-ALIGNED-RV64-NEXT: srliw a5, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, t3
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: lui a6, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a6, a6, -256
-; CHECK-ALIGNED-RV64-NEXT: and a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: and a3, a3, t3
-; CHECK-ALIGNED-RV64-NEXT: srli a4, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: and a4, a4, t5
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, t0, a1
-; CHECK-ALIGNED-RV64-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-NEXT: srliw a3, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: and a4, t1, t3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: and a4, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: sltu a1, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a1, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB29_3: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB29_3: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a6, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a6, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a2, a1, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a2, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a7, a1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a0
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB29_3
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB29_3: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 8
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_8:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t1
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srli t2, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui t3, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t2, t2, t3
-; CHECK-ALIGNED-RV64-V-NEXT: srli t4, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li t5, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and t4, t4, t5
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t4, t2
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, t2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a5, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, t3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lui a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a6, a6, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, a3, t3
-; CHECK-ALIGNED-RV64-V-NEXT: srli a4, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, a4, t5
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, t0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a3, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, t1, t3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a1, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a1, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 8
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_8:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a2, a2, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a2
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB29_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a1, a0
-; CHECK-UNALIGNED-RV32-NEXT: bne a3, a4, .LBB29_3
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV32-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB29_3: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 8
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_8:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a0, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a1, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a0, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a6, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a6, a6, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a0, a6
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a1, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a1, a6
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: sltu a2, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a0, a1
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a2, a0
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 8
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_8:
@@ -11921,114 +4236,22 @@ define i32 @memcmp_size_8(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_8:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a2, a2, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB29_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a3, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a1, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a3, a4, .LBB29_3
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB29_3: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 8
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_8:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a0, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a1, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a0, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a6, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a6, a6, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a0, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a1, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a2, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a0, a1
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a2, a0
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 8
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 8)
@@ -12038,1060 +4261,102 @@ entry:
define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_15:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 7(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 12(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 13(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB30_5: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 15
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_15:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: or a6, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a6, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t1, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, t3, t0
-; CHECK-ALIGNED-RV64-NEXT: srli t4, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: lui t5, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t4, t4, t5
-; CHECK-ALIGNED-RV64-NEXT: srli t6, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: li s0, 255
-; CHECK-ALIGNED-RV64-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-NEXT: and t6, t6, s0
-; CHECK-ALIGNED-RV64-NEXT: or t4, t6, t4
-; CHECK-ALIGNED-RV64-NEXT: srliw a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-NEXT: or a6, t4, a4
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, t5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-NEXT: and a5, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-NEXT: srli a5, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a5, a5, t5
-; CHECK-ALIGNED-RV64-NEXT: srli a6, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, s0
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: srliw a6, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: srliw a6, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-NEXT: and a7, t3, t5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: and a7, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 56
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a6, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb t1, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli t0, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli t0, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: and a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: andi a6, a6, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: and a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: andi a6, t1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 32
-; CHECK-ALIGNED-RV64-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: ret
-; CHECK-ALIGNED-RV64-NEXT: .LBB30_4: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_15:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 7(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 8(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 13(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 13(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB30_5: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 15
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_15:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB30_4: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_15:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 7(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 13(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 13(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a3, a3, 16
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB30_5: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 15
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_15:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB30_4: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_15:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 7(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 12(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 13(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB30_5
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB30_5: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 15
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_15:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-V-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a6, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t1, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t3, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli t4, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui t5, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t4, t4, t5
-; CHECK-ALIGNED-RV64-V-NEXT: srli t6, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li s0, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, t6, s0
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t6, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t4, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a5, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-V-NEXT: srli a5, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a5, a5, t5
-; CHECK-ALIGNED-RV64-V-NEXT: srli a6, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, s0
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a6, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a6, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, t3, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-V-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 15
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a6, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb t1, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: andi a6, a6, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: andi a6, t1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a3, .LBB30_4
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: ret
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB30_4: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_15:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 8(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 8(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a6, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-NEXT: and a7, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: srli a7, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-NEXT: lhu a2, 12(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lhu a4, 12(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -1
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.4: # %loadbb4
-; CHECK-UNALIGNED-RV32-NEXT: lbu a0, 14(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lbu a1, 14(a1)
-; CHECK-UNALIGNED-RV32-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB30_5: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 15
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_15:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a3, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a4, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a5, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a7, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a3, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a2, a2, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a3, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a4, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a4, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB30_4
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: lw a3, 8(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a4, 8(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a4, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-UNALIGNED-RV64-NEXT: srli a3, a3, 32
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 32
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 32
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB30_4
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-NEXT: lhu a2, 12(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lhu a3, 12(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a5, a3, -1
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a2, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a3, a4, .LBB30_4
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 14(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 14(a1)
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB30_4: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 15
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_15:
@@ -13208,200 +4473,22 @@ define i32 @memcmp_size_15(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_15:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 8(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 8(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a6, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: and a7, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a7, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a2, 12(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lhu a4, 12(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -1
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB30_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.4: # %loadbb4
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a0, 14(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lbu a1, 14(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB30_5: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 15
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_15:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a3, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a4, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a5, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a7, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a3, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a2, a2, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a3, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a4, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a4, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB30_4
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a3, 8(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a4, 8(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a4, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a3, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB30_4
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a2, 12(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a3, 12(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a5, a3, -1
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a2, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a3, a4, .LBB30_4
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 14(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 14(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB30_4: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 15
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 15)
@@ -13411,1169 +4498,102 @@ entry:
define i32 @memcmp_size_16(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 7(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 14(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a7, a1
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a0
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV32-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB31_5: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB31_3
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV64-NEXT: or t0, a0, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t2, t2, a0
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t5
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t4
-; CHECK-ALIGNED-RV64-NEXT: slli t4, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or t4, t4, t2
-; CHECK-ALIGNED-RV64-NEXT: srli t5, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: and t5, t5, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t6, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: and t6, t6, a3
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, t5, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: srli a6, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, a2
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: and a3, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, t3, a1
-; CHECK-ALIGNED-RV64-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-NEXT: srliw a3, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: and a2, t4, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: and a3, t2, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: or a6, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB31_3
-; CHECK-ALIGNED-RV64-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-NEXT: ret
-; CHECK-ALIGNED-RV64-NEXT: .LBB31_3: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 7(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 8(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 13(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 12(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 13(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 14(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV32-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB31_5: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB31_3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB31_3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB31_3: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 7(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 14(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB31_5: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB31_3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 15(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a6, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 15(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a6, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB31_3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB31_3: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 7(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 14(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a7, a1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a0
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB31_5
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB31_5: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 16
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_16:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-V-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 16
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB31_3
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 15(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, a0, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 15(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t5
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t4, t2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t5, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and t5, t5, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t6, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, t6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srli a6, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, t3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a3, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a2, t4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, t2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB31_3
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.2:
-; CHECK-ALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-V-NEXT: ret
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB31_3: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_16:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 8(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a4, 8(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 12(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 12(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a4, a1, a0
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV32-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB31_5: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 16
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_16:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a5, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a3, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a2, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a5, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a4, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a5, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a5, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a5, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a5, a6, .LBB31_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: ld a0, 8(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a1, 8(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a0, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a1, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: bne a5, a6, .LBB31_3
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB31_3: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 16
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_16:
@@ -14690,211 +4710,22 @@ define i32 @memcmp_size_16(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_16:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 8(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a4, 8(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a4, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 12(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 12(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a4, a1, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a4, .LBB31_5
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB31_5: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 16
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_16:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a5, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a2, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a5, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a4, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a5, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a5, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a5, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a5, a6, .LBB31_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a0, 8(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a1, 8(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a5, a6, .LBB31_3
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB31_3: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 16
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 16)
@@ -14915,339 +4746,10 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_31:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB32_6
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 15(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or t0, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t3, t3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t6, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu s0, 15(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t6, 8
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t5, 32
-; CHECK-ALIGNED-RV64-NEXT: or t6, t6, t3
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a2
-; CHECK-ALIGNED-RV64-NEXT: srli s1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: and s1, s1, a3
-; CHECK-ALIGNED-RV64-NEXT: or s0, s1, s0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-NEXT: or t0, s0, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t2, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t6, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t6, 8
-; CHECK-ALIGNED-RV64-NEXT: and a3, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw a7, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t4, a7
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw a7, t6, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-NEXT: and a2, t6, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: and a4, t3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: or a6, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB32_6
-; CHECK-ALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 17(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 16(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 19(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a5, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 23(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: or a6, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a6, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 16(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 17(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a5
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 18(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 20(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 21(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 22(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 23(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t1, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, t3, t0
-; CHECK-ALIGNED-RV64-NEXT: srli t4, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: lui t5, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t4, t4, t5
-; CHECK-ALIGNED-RV64-NEXT: srli t6, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: li s0, 255
-; CHECK-ALIGNED-RV64-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-NEXT: and t6, t6, s0
-; CHECK-ALIGNED-RV64-NEXT: or t4, t6, t4
-; CHECK-ALIGNED-RV64-NEXT: srliw a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-NEXT: or a6, t4, a4
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, t5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-NEXT: and a5, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: or a5, a2, a6
-; CHECK-ALIGNED-RV64-NEXT: srli a2, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a2, a2, t5
-; CHECK-ALIGNED-RV64-NEXT: srli a6, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, s0
-; CHECK-ALIGNED-RV64-NEXT: or a2, a6, a2
-; CHECK-ALIGNED-RV64-NEXT: srliw a6, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-NEXT: srliw a6, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-NEXT: and a7, t3, t5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: and a7, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 56
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: or a6, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB32_6
-; CHECK-ALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 25(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 24(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a6, 27(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 24(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 25(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 26(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb t1, 27(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli t0, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: slli t0, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: and a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: andi a6, a6, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a7, 24
-; CHECK-ALIGNED-RV64-NEXT: and a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: andi a6, t1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 32
-; CHECK-ALIGNED-RV64-NEXT: srli a5, a2, 32
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: srli a6, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB32_6
-; CHECK-ALIGNED-RV64-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 28(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 29(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 28(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 29(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a4, a6
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB32_6
-; CHECK-ALIGNED-RV64-NEXT: # %bb.5: # %loadbb5
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 30(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 30(a1)
-; CHECK-ALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: j .LBB32_7
-; CHECK-ALIGNED-RV64-NEXT: .LBB32_6: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
-; CHECK-ALIGNED-RV64-NEXT: .LBB32_7: # %endblock
-; CHECK-ALIGNED-RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; CHECK-ALIGNED-RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 31
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
@@ -15263,198 +4765,12 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_31:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB32_6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 15(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 15(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB32_6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 17(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 16(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 19(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 20(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 21(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 23(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 16(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 17(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 18(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 19(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 20(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 21(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 22(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 23(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB32_6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 25(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 24(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 26(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a5, 27(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 24(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 25(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 26(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a6, 27(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB32_6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 29(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 28(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 29(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 28(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB32_6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.5: # %loadbb5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 30(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 30(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB32_6: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 31
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_31:
@@ -15469,166 +4785,12 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_31:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB32_6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 15(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB32_6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 20(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 21(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 23(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 16(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 17(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 18(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 20(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 21(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 22(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 23(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 16(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 17(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 18(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB32_6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 24(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 25(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 26(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a5, 27(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 24(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 25(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 26(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a7, 27(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a3, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a7, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB32_6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 29(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 28(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 29(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 28(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a2, a2, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a3, a3, 48
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB32_6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.5: # %loadbb5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 30(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 30(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB32_6: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 31
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_31:
@@ -15644,339 +4806,10 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_31:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-V-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-V-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB32_6
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 15(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t6, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu s0, 15(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, t6, t3
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli s1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and s1, s1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or s0, s1, s0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, s0, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t2, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a7, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a7, t6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a2, t6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, t3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB32_6
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 17(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 16(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 19(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 20(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 21(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 23(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a6, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 16(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 17(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 18(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 20(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 21(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 22(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 23(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t1, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t3, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli t4, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui t5, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t4, t4, t5
-; CHECK-ALIGNED-RV64-V-NEXT: srli t6, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li s0, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, t6, s0
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t6, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t4, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a5, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a2, a6
-; CHECK-ALIGNED-RV64-V-NEXT: srli a2, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a2, a2, t5
-; CHECK-ALIGNED-RV64-V-NEXT: srli a6, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, s0
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a6, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a6, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, t3, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB32_6
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 25(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 24(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 26(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a6, 27(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 24(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 25(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 26(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb t1, 27(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: andi a6, a6, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a7, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: andi a6, t1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: srli a5, a2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: srli a6, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB32_6
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 28(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 29(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 28(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 29(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a4, a6
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB32_6
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.5: # %loadbb5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 30(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 30(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: j .LBB32_7
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB32_6: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB32_7: # %endblock
-; CHECK-ALIGNED-RV64-V-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; CHECK-ALIGNED-RV64-V-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 31
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
@@ -15992,202 +4825,12 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_31:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_6
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 8(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 8(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_6
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a4, 16(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a3, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a5, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a7, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a5
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a4, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a4, a6
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_6
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-NEXT: lw a2, 24(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a4, 24(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a6, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 32
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a2, 32
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a3, 32
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_6
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.4: # %loadbb4
-; CHECK-UNALIGNED-RV64-NEXT: lhu a2, 28(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lhu a3, 28(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -1
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB32_6
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.5: # %loadbb5
-; CHECK-UNALIGNED-RV64-NEXT: lbu a0, 30(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lbu a1, 30(a1)
-; CHECK-UNALIGNED-RV64-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB32_6: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 31
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_31:
@@ -16386,202 +5029,12 @@ define i32 @memcmp_size_31(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_31:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_6
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 8(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 8(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_6
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a4, 16(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a3, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a5, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a7, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a4, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a4, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a4, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_6
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a2, 24(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a4, 24(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a6, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a2, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a3, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_6
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.4: # %loadbb4
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a2, 28(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lhu a3, 28(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -1
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB32_6
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.5: # %loadbb5
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a0, 30(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lbu a1, 30(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: sub a0, a0, a1
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB32_6: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 31
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 31)
@@ -16591,2234 +5044,102 @@ entry:
define i32 @memcmp_size_32(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: memcmp_size_32:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 7(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 15(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 14(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 15(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 16(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 17(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 19(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 18(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 19(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-NEXT: # %bb.5: # %loadbb5
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 20(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 21(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 23(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 20(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 21(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 22(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-NEXT: # %bb.6: # %loadbb6
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 24(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 25(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 26(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 27(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 24(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 25(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t0, 26(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu t1, 27(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-NEXT: # %bb.7: # %loadbb7
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 28(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 29(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 30(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 28(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 29(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 30(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a7, a1
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a3, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: or a3, a3, a0
-; CHECK-ALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-NEXT: # %bb.8:
-; CHECK-ALIGNED-RV32-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-NEXT: ret
-; CHECK-ALIGNED-RV32-NEXT: .LBB33_9: # %res_block
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 32
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_size_32:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 15(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or t0, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t3, t3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t6, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu s0, 15(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t6, 8
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t5, 32
-; CHECK-ALIGNED-RV64-NEXT: or t6, t6, t3
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a2
-; CHECK-ALIGNED-RV64-NEXT: srli s1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: and s1, s1, a3
-; CHECK-ALIGNED-RV64-NEXT: or s0, s1, s0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-NEXT: or t0, s0, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t2, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t6, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t6, 8
-; CHECK-ALIGNED-RV64-NEXT: and a3, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw a7, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t4, a7
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw a7, t6, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-NEXT: and a2, t6, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: and a4, t3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-NEXT: or a6, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 17(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 18(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 19(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 20(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 21(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 22(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 23(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 17(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 18(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 20(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 21(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 22(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 23(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 25(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 24(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 26(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 28(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 29(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 30(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV64-NEXT: or t0, a0, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 24(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 25(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 26(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 27(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t2, t2, a0
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 28(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 29(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 30(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t5
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t4
-; CHECK-ALIGNED-RV64-NEXT: slli t4, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or t4, t4, t2
-; CHECK-ALIGNED-RV64-NEXT: srli t5, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: and t5, t5, a2
-; CHECK-ALIGNED-RV64-NEXT: srli t6, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: and t6, t6, a3
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, t5, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-NEXT: srli a6, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, a2
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: and a3, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, t3, a1
-; CHECK-ALIGNED-RV64-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-NEXT: srliw a3, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-NEXT: and a2, t4, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: and a3, t2, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: or a6, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV64-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-NEXT: j .LBB33_6
-; CHECK-ALIGNED-RV64-NEXT: .LBB33_5: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a5, a6
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
-; CHECK-ALIGNED-RV64-NEXT: .LBB33_6: # %endblock
-; CHECK-ALIGNED-RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; CHECK-ALIGNED-RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
-;
-; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_32:
-; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 5(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 4(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 5(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 7(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 8(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 10(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 13(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 15(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 12(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 13(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 14(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 15(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 17(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 16(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 19(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 16(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 17(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 18(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 19(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.5: # %loadbb5
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 21(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 20(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 23(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 20(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 21(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 22(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 23(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.6: # %loadbb6
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 25(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 24(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 26(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 27(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 24(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a5, 25(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 26(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a6, 27(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.7: # %loadbb7
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 29(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 28(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 30(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 28(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 29(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 30(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBB-NEXT: # %bb.8:
-; CHECK-ALIGNED-RV32-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBB-NEXT: .LBB33_9: # %res_block
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ori a0, a0, 1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
-;
-; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_32:
-; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 15(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 15(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 17(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 16(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 19(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 20(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 21(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 23(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 16(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 17(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 18(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 19(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 20(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 21(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 22(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 23(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 25(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 24(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 26(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 27(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 28(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 29(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 30(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 24(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 25(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 26(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 27(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 28(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 29(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 30(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV64-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB33_5: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+;
+; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_size_32:
+; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 32
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_32:
+; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_size_32:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 7(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 15(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 14(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 15(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 16(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 17(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 19(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 18(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 19(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.5: # %loadbb5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 20(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 21(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 23(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 20(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 21(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 22(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.6: # %loadbb6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 24(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 25(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 26(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 27(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 24(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 25(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t0, 26(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu t1, 27(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a4, a5
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a2, a2, a4
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a3, t0, t1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.7: # %loadbb7
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 28(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 29(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 30(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 28(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 29(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 30(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: # %bb.8:
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: .LBB33_9: # %res_block
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 32
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_32:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 15(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 20(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 21(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 23(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 16(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 17(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 18(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 20(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 21(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 22(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 23(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 16(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 17(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 18(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 28(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 29(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 30(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 31(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 24(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 25(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 26(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 27(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a6, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 28(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 29(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 30(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 31(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 24(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 25(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 26(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 27(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a6, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB33_5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB33_5: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_size_32:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 4(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 5(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 6(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 7(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 9(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 9(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 10(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 11(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 15(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 12(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 13(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 14(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 15(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 16(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 17(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 19(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 17(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 18(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 19(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.5: # %loadbb5
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 20(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 21(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 23(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 20(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 21(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 22(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 23(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.6: # %loadbb6
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 24(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 25(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 26(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 27(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 24(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 25(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t0, 26(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu t1, 27(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV32-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, t0, t1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a4, a6, a7
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.7: # %loadbb7
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 28(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 29(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 30(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 28(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 29(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 30(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a7, a1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a3, a3, a0
-; CHECK-ALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-ALIGNED-RV32-V-NEXT: # %bb.8:
-; CHECK-ALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV32-V-NEXT: ret
-; CHECK-ALIGNED-RV32-V-NEXT: .LBB33_9: # %res_block
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 32
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_32:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-V-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-V-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 15(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t6, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu s0, 15(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, t6, t3
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli s1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and s1, s1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or s0, s1, s0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, s0, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t2, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a7, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a7, t6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a2, t6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, t3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 17(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 16(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 18(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 19(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 20(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 21(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 22(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 23(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 17(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 18(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, t2, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 20(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 21(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 22(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 23(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, a2, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srli a3, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui a2, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li a3, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a4, a4, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t0, t0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t4, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 25(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 24(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 26(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 28(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 29(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 30(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 31(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, a0, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 24(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 25(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 26(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 27(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 28(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 29(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 30(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 31(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t5
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t4, t2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t5, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and t5, t5, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t6, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, t6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srli a6, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, t3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a3, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a2, t4, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, t2, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: bne a5, a6, .LBB33_5
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.4:
-; CHECK-ALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-V-NEXT: j .LBB33_6
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB33_5: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a5, a6
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB33_6: # %endblock
-; CHECK-ALIGNED-RV64-V-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; CHECK-ALIGNED-RV64-V-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 32
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_size_32:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 4(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 4(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 8(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 8(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 12(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 12(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.4: # %loadbb4
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 16(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 16(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.5: # %loadbb5
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 20(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 20(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.6: # %loadbb6
-; CHECK-UNALIGNED-RV32-NEXT: lw a2, 24(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a3, 24(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.7: # %loadbb7
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 28(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 28(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a3, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: srli a3, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a3, a1, a0
-; CHECK-UNALIGNED-RV32-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-NEXT: # %bb.8:
-; CHECK-UNALIGNED-RV32-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-NEXT: ret
-; CHECK-UNALIGNED-RV32-NEXT: .LBB33_9: # %res_block
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 32
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_32:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 8(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 8(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 16(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-NEXT: ld a0, 24(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a1, 24(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a6, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a5, a0
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a1, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a5, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB33_5: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 32
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_size_32:
@@ -19007,400 +5328,22 @@ define i32 @memcmp_size_32(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_size_32:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 4(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 4(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 8(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 8(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 12(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 12(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.4: # %loadbb4
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 16(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 16(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.5: # %loadbb5
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 20(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 20(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.6: # %loadbb6
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a2, 24(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a3, 24(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a2, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a4, a4, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a5, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a6, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: and a6, a3, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a6, a6, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a6
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.7: # %loadbb7
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 28(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 28(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a3, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a0, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a3, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a3, a1, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: bne a2, a3, .LBB33_9
-; CHECK-UNALIGNED-RV32-V-NEXT: # %bb.8:
-; CHECK-UNALIGNED-RV32-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV32-V-NEXT: ret
-; CHECK-UNALIGNED-RV32-V-NEXT: .LBB33_9: # %res_block
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV32-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 32
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_32:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 8(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 8(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 16(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a0, 24(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a1, 24(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a6, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a5, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a5, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB33_5
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.4:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB33_5: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 32
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 32)
@@ -19577,2592 +5520,86 @@ define i32 @memcmp_size_63(ptr %s1, ptr %s2) nounwind {
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB34_9
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: # %bb.8:
; CHECK-UNALIGNED-RV64-ZBKB-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB34_9: # %res_block
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
-; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
-;
-; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_63:
-; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
-; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 63
-; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
-; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-entry:
- %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 63)
- ret i32 %memcmp
-}
-
-define i32 @memcmp_size_64(ptr %s1, ptr %s2) nounwind {
-; CHECK-RV32-LABEL: memcmp_size_64:
-; CHECK-RV32: # %bb.0: # %entry
-; CHECK-RV32-NEXT: addi sp, sp, -16
-; CHECK-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT: li a2, 64
-; CHECK-RV32-NEXT: call memcmp
-; CHECK-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; CHECK-RV32-NEXT: addi sp, sp, 16
-; CHECK-RV32-NEXT: ret
-;
-; CHECK-ALIGNED-RV64-LABEL: memcmp_size_64:
-; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 4(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 6(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 7(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a5, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-NEXT: or a5, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, t2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t4, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 6(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 7(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, t2
-; CHECK-ALIGNED-RV64-NEXT: or t2, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, a3, t4
-; CHECK-ALIGNED-RV64-NEXT: srli a4, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t6, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: li a4, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a4
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: srliw a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or t0, t6, a5
-; CHECK-ALIGNED-RV64-NEXT: srliw a5, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a5
-; CHECK-ALIGNED-RV64-NEXT: lui a5, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a5, a5, -256
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t3, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and t1, t4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 12(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 13(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 14(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 15(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or t0, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 9(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 10(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 11(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t3, t3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 12(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t6, 13(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 14(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu s0, 15(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t6, 8
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t5, 32
-; CHECK-ALIGNED-RV64-NEXT: or t6, t6, t3
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-NEXT: srli s1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: and s1, s1, a4
-; CHECK-ALIGNED-RV64-NEXT: or s0, s1, s0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-NEXT: or t0, s0, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t2, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t6, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t6, 8
-; CHECK-ALIGNED-RV64-NEXT: and a4, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw a7, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t4, a7
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw a7, t6, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-NEXT: and a3, t6, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: and a5, t3, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: or a6, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 17(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 16(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 19(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 20(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 21(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 22(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 23(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a5, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-NEXT: or a5, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 17(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 18(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, t2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 20(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 21(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t4, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 22(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 23(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, t2
-; CHECK-ALIGNED-RV64-NEXT: or t2, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, a3, t4
-; CHECK-ALIGNED-RV64-NEXT: srli a4, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t6, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: li a4, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a4
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: srliw a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or t0, t6, a5
-; CHECK-ALIGNED-RV64-NEXT: srliw a5, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a5
-; CHECK-ALIGNED-RV64-NEXT: lui a5, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a5, a5, -256
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t3, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and t1, t4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 25(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 24(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 26(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 28(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 29(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 30(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 31(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or t0, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 24(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 25(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 26(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 27(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t3, t3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 28(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t6, 29(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 30(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu s0, 31(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t6, 8
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t5, 32
-; CHECK-ALIGNED-RV64-NEXT: or t6, t6, t3
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-NEXT: srli s1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: and s1, s1, a4
-; CHECK-ALIGNED-RV64-NEXT: or s0, s1, s0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-NEXT: or t0, s0, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t2, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t6, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t6, 8
-; CHECK-ALIGNED-RV64-NEXT: and a4, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw a7, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t4, a7
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw a7, t6, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-NEXT: and a3, t6, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: and a5, t3, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: or a6, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 33(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 32(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 34(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 35(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 36(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 37(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 38(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 39(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a5, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-NEXT: or a5, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 32(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 33(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 34(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 35(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, t2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 36(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 37(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t4, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 38(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 39(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, t2
-; CHECK-ALIGNED-RV64-NEXT: or t2, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, a3, t4
-; CHECK-ALIGNED-RV64-NEXT: srli a4, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t6, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: li a4, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a4
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: srliw a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or t0, t6, a5
-; CHECK-ALIGNED-RV64-NEXT: srliw a5, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a5
-; CHECK-ALIGNED-RV64-NEXT: lui a5, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a5, a5, -256
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t3, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and t1, t4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-NEXT: # %bb.5: # %loadbb5
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 41(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 40(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 42(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 43(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 44(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 45(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 46(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 47(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or t0, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 40(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 41(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 42(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 43(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or t3, t3, a6
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 44(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t6, 45(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 46(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu s0, 47(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t6, 8
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: slli t6, t5, 32
-; CHECK-ALIGNED-RV64-NEXT: or t6, t6, t3
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-NEXT: srli s1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: and s1, s1, a4
-; CHECK-ALIGNED-RV64-NEXT: or s0, s1, s0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-NEXT: or t0, s0, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t2, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t6, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t6, 8
-; CHECK-ALIGNED-RV64-NEXT: and a4, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw a7, t5, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, t4, a7
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw a7, t6, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-NEXT: and a3, t6, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: and a5, t3, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-NEXT: or a6, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-NEXT: # %bb.6: # %loadbb6
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 49(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 48(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 50(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 51(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 52(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 53(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a7, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 54(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 55(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a5, t0, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-NEXT: or a5, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 48(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 49(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, a3, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 50(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 51(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, t2, a3
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 52(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 53(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t4, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 54(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 55(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a4, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, t2
-; CHECK-ALIGNED-RV64-NEXT: or t2, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: slli a3, t2, 32
-; CHECK-ALIGNED-RV64-NEXT: or t3, a3, t4
-; CHECK-ALIGNED-RV64-NEXT: srli a4, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-ALIGNED-RV64-NEXT: and t6, a4, a3
-; CHECK-ALIGNED-RV64-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: li a4, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-NEXT: and s0, s0, a4
-; CHECK-ALIGNED-RV64-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-NEXT: srliw a5, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-NEXT: or t0, t6, a5
-; CHECK-ALIGNED-RV64-NEXT: srliw a5, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a5
-; CHECK-ALIGNED-RV64-NEXT: lui a5, 16
-; CHECK-ALIGNED-RV64-NEXT: addi a5, a5, -256
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t3, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and t1, t4, a5
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-NEXT: # %bb.7: # %loadbb7
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 57(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 56(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 58(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 59(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-NEXT: or a6, a6, a2
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-NEXT: lbu t0, 60(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu t1, 61(a0)
-; CHECK-ALIGNED-RV64-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 62(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 63(a0)
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, a7, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV64-NEXT: or t0, a0, t0
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: lbu a0, 56(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t2, 57(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t1, t1, a6
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 58(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 59(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-NEXT: or t2, t2, a0
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-NEXT: lbu t4, 60(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu t5, 61(a1)
-; CHECK-ALIGNED-RV64-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-NEXT: lbu t3, 62(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a1, 63(a1)
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-NEXT: slli t5, t3, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t5
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, t4
-; CHECK-ALIGNED-RV64-NEXT: slli t4, a1, 32
-; CHECK-ALIGNED-RV64-NEXT: or t4, t4, t2
-; CHECK-ALIGNED-RV64-NEXT: srli t5, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: and t5, t5, a3
-; CHECK-ALIGNED-RV64-NEXT: srli t6, t1, 8
-; CHECK-ALIGNED-RV64-NEXT: and t6, t6, a4
-; CHECK-ALIGNED-RV64-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-NEXT: or a7, t5, a7
-; CHECK-ALIGNED-RV64-NEXT: srliw t0, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-NEXT: srli a6, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: and a6, a6, a3
-; CHECK-ALIGNED-RV64-NEXT: srli a7, t4, 8
-; CHECK-ALIGNED-RV64-NEXT: and a4, a7, a4
-; CHECK-ALIGNED-RV64-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, t3, a1
-; CHECK-ALIGNED-RV64-NEXT: or a1, a4, a1
-; CHECK-ALIGNED-RV64-NEXT: srliw a4, t4, 24
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-NEXT: and a3, t4, a3
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-NEXT: and a4, t2, a5
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-NEXT: or a6, a0, a1
-; CHECK-ALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-NEXT: # %bb.8:
-; CHECK-ALIGNED-RV64-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-NEXT: j .LBB35_10
-; CHECK-ALIGNED-RV64-NEXT: .LBB35_9: # %res_block
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a2, a6
-; CHECK-ALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-NEXT: ori a0, a0, 1
-; CHECK-ALIGNED-RV64-NEXT: .LBB35_10: # %endblock
-; CHECK-ALIGNED-RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; CHECK-ALIGNED-RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
-; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
-; CHECK-ALIGNED-RV64-NEXT: ret
-;
-; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_64:
-; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 4(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 5(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 7(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 6(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 7(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 9(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 8(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 10(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 11(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 12(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 13(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 15(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 8(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 9(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 10(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 11(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 14(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 15(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 17(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 16(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 19(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 20(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 21(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 23(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 16(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 17(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 18(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 19(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 20(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 21(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 22(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 23(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 25(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 24(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 26(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 27(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 28(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 29(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 30(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 31(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 24(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 25(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 26(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 27(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 28(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 29(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 30(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 31(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 33(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 32(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 34(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 35(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 36(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 37(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 38(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 39(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 32(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 33(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 34(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 35(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 36(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 37(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 38(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 39(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.5: # %loadbb5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 41(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 40(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 42(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 43(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 44(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 45(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 46(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 47(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 40(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 41(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 42(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 43(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 44(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 45(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 46(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 47(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.6: # %loadbb6
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 49(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 48(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 50(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 51(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 52(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 53(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 54(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 55(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a6, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 48(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 49(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 50(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 51(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a6, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 52(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a6, 53(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 54(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a7, 55(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a7, a7, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a7, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a4, a5
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.7: # %loadbb7
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 57(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 56(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 58(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 59(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 60(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 61(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a4, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 62(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a0, 63(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 56(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 57(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 58(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 59(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a5, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 60(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a5, 61(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 62(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a1, 63(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a5, a5, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBB-NEXT: # %bb.8:
-; CHECK-ALIGNED-RV64-ZBB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBB-NEXT: .LBB35_9: # %res_block
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: .LBB34_9: # %res_block
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: neg a0, a0
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-ZBKB-NEXT: ret
+;
+; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_63:
+; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 63
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
+; CHECK-UNALIGNED-RV64-V-NEXT: ret
+entry:
+ %memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 63)
+ ret i32 %memcmp
+}
+
+define i32 @memcmp_size_64(ptr %s1, ptr %s2) nounwind {
+; CHECK-RV32-LABEL: memcmp_size_64:
+; CHECK-RV32: # %bb.0: # %entry
+; CHECK-RV32-NEXT: addi sp, sp, -16
+; CHECK-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-RV32-NEXT: li a2, 64
+; CHECK-RV32-NEXT: call memcmp
+; CHECK-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-RV32-NEXT: addi sp, sp, 16
+; CHECK-RV32-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-LABEL: memcmp_size_64:
+; CHECK-ALIGNED-RV64: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 64
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
+; CHECK-ALIGNED-RV64-NEXT: ret
+;
+; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_size_64:
+; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 64
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_size_64:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 4(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 5(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 6(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 7(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 4(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 5(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 6(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 7(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 12(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 13(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 14(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 15(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 8(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 9(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 10(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 11(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 12(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 13(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 14(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 15(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 8(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 9(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 10(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 11(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 20(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 21(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 22(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 23(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 16(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 17(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 18(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 19(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 20(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 21(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 22(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 23(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 16(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 17(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 18(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 19(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 28(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 29(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 30(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 31(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 24(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 25(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 26(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 27(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 28(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 29(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 30(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 31(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 24(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 25(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 26(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 27(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 36(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 37(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 38(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 39(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 32(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 33(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 34(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 35(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 36(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 37(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 38(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 39(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 32(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 33(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 34(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 35(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.5: # %loadbb5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 44(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 45(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 46(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 47(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 40(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 41(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 42(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 43(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 44(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 45(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 46(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 47(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 40(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 41(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 42(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 43(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.6: # %loadbb6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 52(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 53(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 54(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 55(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 48(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 49(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 50(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 51(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 52(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 53(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 54(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 55(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 48(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 49(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 50(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu t0, 51(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a4, a5, a6
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a5, a7, t0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.7: # %loadbb7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 60(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 61(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 62(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 63(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 56(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 57(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 58(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a0, 59(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a0, a6, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 60(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 61(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 62(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a7, 63(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a6, a7
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 56(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 57(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a6, 58(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a1, 59(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a2, a3, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a3, a4, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a1, a6, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: pack a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a2, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a3, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: bne a2, a3, .LBB35_9
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: # %bb.8:
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: .LBB35_9: # %res_block
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: ori a0, a0, 1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 64
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_size_64:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
-; CHECK-ALIGNED-RV64-V-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-V-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 4(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 5(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 6(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 7(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 4(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 5(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 6(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 7(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, a3, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srli a4, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li a4, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a5, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lui a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a5, a5, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t3, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 9(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 8(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 10(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 11(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 12(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 13(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 14(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 15(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 8(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 9(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 10(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 11(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 12(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t6, 13(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 14(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu s0, 15(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, t6, t3
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli s1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and s1, s1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or s0, s1, s0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, s0, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t2, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a7, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a7, t6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, t6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: and a5, t3, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 17(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 16(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 18(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 19(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 20(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 21(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 22(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 23(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 16(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 17(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 18(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 19(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 20(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 21(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 22(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 23(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, a3, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srli a4, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li a4, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a5, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lui a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a5, a5, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t3, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 25(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 24(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 26(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 27(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 28(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 29(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 30(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 31(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 24(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 25(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 26(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 27(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 28(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t6, 29(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 30(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu s0, 31(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, t6, t3
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli s1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and s1, s1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or s0, s1, s0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, s0, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t2, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a7, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a7, t6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, t6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: and a5, t3, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.4: # %loadbb4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 33(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 32(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 34(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 35(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 36(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 37(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 38(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 39(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 32(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 33(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 34(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 35(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 36(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 37(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 38(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 39(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, a3, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srli a4, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li a4, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a5, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lui a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a5, a5, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t3, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.5: # %loadbb5
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 41(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 40(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 42(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 43(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 44(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 45(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 46(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 47(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 40(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 41(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 42(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 43(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t3, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 44(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t6, 45(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 46(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu s0, 47(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli s0, s0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t6, t5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, t6, t3
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli s1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and s1, s1, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or s0, s1, s0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, s0, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t2, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, t2
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a7, t5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a7, t6, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, t6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: and a5, t3, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.6: # %loadbb6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 49(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 48(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 50(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 51(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a5, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 52(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 53(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 54(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 55(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, t0, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 48(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 49(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, a3, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 50(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 51(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, t2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 52(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 53(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 54(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 55(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, t2, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, a3, t4
-; CHECK-ALIGNED-RV64-V-NEXT: srli a4, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, a4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli s0, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: li a4, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and s0, s0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or t6, s0, t6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a5, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a5, t0, a5
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a5, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a5
-; CHECK-ALIGNED-RV64-V-NEXT: lui a5, 16
-; CHECK-ALIGNED-RV64-V-NEXT: addi a5, a5, -256
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli t0, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t5, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t3, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t4, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.7: # %loadbb7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 57(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 56(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 58(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 59(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a6, a2
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t0, 60(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t1, 61(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a7, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 62(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 63(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, a7, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, t1
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, a0, t0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a0, 56(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t2, 57(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t1, t1, a6
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 58(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 59(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t2, t2, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t3, t4, t3
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t4, 60(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t5, 61(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or t2, t3, t2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu t3, 62(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a1, 63(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t5, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t5, t4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t5, t3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t5
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, t4
-; CHECK-ALIGNED-RV64-V-NEXT: slli t4, a1, 32
-; CHECK-ALIGNED-RV64-V-NEXT: or t4, t4, t2
-; CHECK-ALIGNED-RV64-V-NEXT: srli t5, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and t5, t5, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli t6, t1, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and t6, t6, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or t5, t6, t5
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a7, t5, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srliw t0, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t0, t0, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and t1, t1, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli t1, t1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or t0, t1, t0
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-ALIGNED-RV64-V-NEXT: srli a6, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: and a6, a6, a3
-; CHECK-ALIGNED-RV64-V-NEXT: srli a7, t4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, a7, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a4, a4, a6
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli t3, t3, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, t3, a1
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a4, a1
-; CHECK-ALIGNED-RV64-V-NEXT: srliw a4, t4, 24
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 32
-; CHECK-ALIGNED-RV64-V-NEXT: and a3, t4, a3
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a3, a3, a4
-; CHECK-ALIGNED-RV64-V-NEXT: and a4, t2, a5
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a6, a0, a1
-; CHECK-ALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-ALIGNED-RV64-V-NEXT: # %bb.8:
-; CHECK-ALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-ALIGNED-RV64-V-NEXT: j .LBB35_10
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB35_9: # %res_block
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a2, a6
-; CHECK-ALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-ALIGNED-RV64-V-NEXT: ori a0, a0, 1
-; CHECK-ALIGNED-RV64-V-NEXT: .LBB35_10: # %endblock
-; CHECK-ALIGNED-RV64-V-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
-; CHECK-ALIGNED-RV64-V-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 64
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_size_64:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 8(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 8(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 16(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 24(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 24(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.4: # %loadbb4
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 32(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 32(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.5: # %loadbb5
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 40(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 40(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.6: # %loadbb6
-; CHECK-UNALIGNED-RV64-NEXT: ld a2, 48(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a6, 48(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.7: # %loadbb7
-; CHECK-UNALIGNED-RV64-NEXT: ld a0, 56(a0)
-; CHECK-UNALIGNED-RV64-NEXT: ld a1, 56(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a6, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: and a6, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-NEXT: and a7, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a6, a5
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a5, a0
-; CHECK-UNALIGNED-RV64-NEXT: srli a5, a1, 40
-; CHECK-UNALIGNED-RV64-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV64-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-NEXT: srliw a5, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: slli a5, a5, 32
-; CHECK-UNALIGNED-RV64-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-NEXT: # %bb.8:
-; CHECK-UNALIGNED-RV64-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-NEXT: ret
-; CHECK-UNALIGNED-RV64-NEXT: .LBB35_9: # %res_block
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a2, a6
-; CHECK-UNALIGNED-RV64-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 64
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV64-ZBB-LABEL: memcmp_size_64:
@@ -22283,400 +5720,12 @@ define i32 @memcmp_size_64(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_size_64:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.1: # %loadbb1
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 8(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 8(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.2: # %loadbb2
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 16(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 16(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.3: # %loadbb3
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 24(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 24(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.4: # %loadbb4
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 32(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 32(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.5: # %loadbb5
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 40(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 40(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a4, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.6: # %loadbb6
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a2, 48(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a6, 48(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a4, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 4080
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a4, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a2, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: li a5, 255
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a2, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a4, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a4, a4, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a2, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a2, a2, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a7, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, t0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t0, a6, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, t0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli t1, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a7, a7, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: and t0, a6, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t0, t0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw t1, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or t0, t0, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: and t1, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli t1, t1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t1
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, t0
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.7: # %loadbb7
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a0, 56(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: ld a1, 56(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a6, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a0, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a6, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a7, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: and a6, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a6, a6, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a7, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a6, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: and a7, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a7, a7, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a0, a0, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a7
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a0, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: and a0, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a6, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a5, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a5, a1, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: and a5, a5, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a6, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a5, a5, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a5, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a5, a5, 32
-; CHECK-UNALIGNED-RV64-V-NEXT: or a3, a3, a5
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 40
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a1, a1, 56
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a6, a1, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: bne a2, a6, .LBB35_9
-; CHECK-UNALIGNED-RV64-V-NEXT: # %bb.8:
-; CHECK-UNALIGNED-RV64-V-NEXT: li a0, 0
-; CHECK-UNALIGNED-RV64-V-NEXT: ret
-; CHECK-UNALIGNED-RV64-V-NEXT: .LBB35_9: # %res_block
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a2, a6
-; CHECK-UNALIGNED-RV64-V-NEXT: neg a0, a0
-; CHECK-UNALIGNED-RV64-V-NEXT: ori a0, a0, 1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 64
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 64)
@@ -22759,200 +5808,200 @@ entry:
define i1 @memcmp_eq_zero(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a2, 1(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 8
; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
+; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
+; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV32-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV32-NEXT: lbu a2, 2(a0)
+; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
; CHECK-ALIGNED-RV32-NEXT: or a3, a4, a3
; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-NEXT: or a1, a1, a3
+; CHECK-ALIGNED-RV32-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a2
+; CHECK-ALIGNED-RV32-NEXT: or a0, a0, a3
; CHECK-ALIGNED-RV32-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV32-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a2, 1(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 8
; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
+; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
+; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV64-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV64-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV64-NEXT: lbu a2, 2(a0)
+; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
; CHECK-ALIGNED-RV64-NEXT: or a3, a4, a3
; CHECK-ALIGNED-RV64-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-NEXT: or a1, a1, a3
+; CHECK-ALIGNED-RV64-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a2
+; CHECK-ALIGNED-RV64-NEXT: or a0, a0, a3
; CHECK-ALIGNED-RV64-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV64-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
+; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a0)
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
+; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
+; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a3
; CHECK-ALIGNED-RV32-ZBB-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV32-ZBB-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
+; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a0)
+; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
+; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
+; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a3
; CHECK-ALIGNED-RV64-ZBB-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV64-ZBB-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a1)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a1)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a1)
; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a0)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a0)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a0)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a4, a1
; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a7, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
; CHECK-ALIGNED-RV32-ZBKB-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV32-ZBKB-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a1)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a1)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a0)
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a5
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
; CHECK-ALIGNED-RV64-ZBKB-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV64-ZBKB-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 1(a1)
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 8
; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
+; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
+; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 2(a0)
+; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
; CHECK-ALIGNED-RV32-V-NEXT: or a3, a4, a3
; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a1, a3
+; CHECK-ALIGNED-RV32-V-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a2
+; CHECK-ALIGNED-RV32-V-NEXT: or a0, a0, a3
; CHECK-ALIGNED-RV32-V-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV32-V-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_eq_zero:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
+; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 1(a1)
+; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
+; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a1)
+; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 8
; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
+; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
+; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a4
+; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 0(a0)
+; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 1(a0)
+; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a2
+; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 2(a0)
+; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
; CHECK-ALIGNED-RV64-V-NEXT: or a3, a4, a3
; CHECK-ALIGNED-RV64-V-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a1, a3
+; CHECK-ALIGNED-RV64-V-NEXT: slli a0, a0, 24
+; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a2
+; CHECK-ALIGNED-RV64-V-NEXT: or a0, a0, a3
; CHECK-ALIGNED-RV64-V-NEXT: xor a0, a0, a1
; CHECK-ALIGNED-RV64-V-NEXT: seqz a0, a0
; CHECK-ALIGNED-RV64-V-NEXT: ret
@@ -22973,264 +6022,112 @@ entry:
define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: andi a0, a0, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: andi a1, a1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: srli a0, a0, 31
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_lt_zero:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: andi a0, a0, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: andi a1, a1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a0, a1
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: slti a0, a0, 0
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_lt_zero:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a0, a1
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: srli a0, a0, 31
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_lt_zero:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a0, a1
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_lt_zero:
@@ -23275,56 +6172,24 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_lt_zero:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a0, a1
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: srli a0, a0, 31
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_lt_zero:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a0, a1
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 4)
@@ -23335,264 +6200,112 @@ entry:
define i1 @memcmp_gt_zero(ptr %s1, ptr %s2) nounwind {
; CHECK-ALIGNED-RV32-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV32: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV32-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-NEXT: or a2, a5, a6
-; CHECK-ALIGNED-RV32-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV32-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-NEXT: ret
;
; CHECK-ALIGNED-RV64-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV64: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-NEXT: andi a0, a0, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-NEXT: andi a1, a1, 255
-; CHECK-ALIGNED-RV64-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV64-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-NEXT: slliw a2, a5, 24
-; CHECK-ALIGNED-RV64-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV64-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBB-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV32-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV32-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV32-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBB-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-ZBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBB-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV64-ZBB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 1(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lbu a2, 2(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a3, a4, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a2, a2, 16
-; CHECK-ALIGNED-RV64-ZBB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBB-NEXT: or a1, a1, a3
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBB-NEXT: srli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBB-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-ZBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBB-NEXT: ret
;
; CHECK-ALIGNED-RV32-ZBKB-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV32-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a0, a4, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a0, a2, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a1, a7, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: packh a2, a5, a6
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: pack a1, a2, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV32-ZBKB-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV64-ZBKB-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV64-ZBKB: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a2, a3
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a4, a4, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a0, a0, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a3, 0(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a4, 1(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lbu a5, 2(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a0, a0, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: packh a2, a3, a4
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a5, a5, 16
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: slli a1, a1, 24
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a5
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: or a1, a1, a2
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a0, a0
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a0, a0, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: rev8 a1, a1
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: srli a1, a1, 32
-; CHECK-ALIGNED-RV64-ZBKB-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBKB-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-ZBKB-NEXT: ret
;
; CHECK-ALIGNED-RV32-V-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a0, 3(a0)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: lbu a1, 3(a1)
-; CHECK-ALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a2, a2, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV32-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV32-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV32-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV32-V-NEXT: slli a5, a5, 24
-; CHECK-ALIGNED-RV32-V-NEXT: or a2, a5, a6
-; CHECK-ALIGNED-RV32-V-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV32-V-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV32-V-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV32-V-NEXT: ret
;
; CHECK-ALIGNED-RV64-V-LABEL: memcmp_gt_zero:
; CHECK-ALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a2, 0(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a3, 1(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a4, 2(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a0, 3(a0)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a5, 0(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a6, 1(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lbu a7, 2(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: lb a1, 3(a1)
-; CHECK-ALIGNED-RV64-V-NEXT: andi a0, a0, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a4, a0
-; CHECK-ALIGNED-RV64-V-NEXT: slli a3, a3, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a2, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a3
-; CHECK-ALIGNED-RV64-V-NEXT: or a0, a2, a0
-; CHECK-ALIGNED-RV64-V-NEXT: andi a1, a1, 255
-; CHECK-ALIGNED-RV64-V-NEXT: slli a7, a7, 8
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a7, a1
-; CHECK-ALIGNED-RV64-V-NEXT: slli a6, a6, 16
-; CHECK-ALIGNED-RV64-V-NEXT: slliw a2, a5, 24
-; CHECK-ALIGNED-RV64-V-NEXT: or a2, a2, a6
-; CHECK-ALIGNED-RV64-V-NEXT: or a1, a2, a1
-; CHECK-ALIGNED-RV64-V-NEXT: sltu a0, a1, a0
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-ALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-ALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-ALIGNED-RV64-V-NEXT: sgtz a0, a0
+; CHECK-ALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-ALIGNED-RV64-V-NEXT: ret
;
; CHECK-UNALIGNED-RV32-LABEL: memcmp_gt_zero:
; CHECK-UNALIGNED-RV32: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV32-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-NEXT: sgtz a0, a0
+; CHECK-UNALIGNED-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-NEXT: ret
;
; CHECK-UNALIGNED-RV64-LABEL: memcmp_gt_zero:
; CHECK-UNALIGNED-RV64: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV64-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-NEXT: sgtz a0, a0
+; CHECK-UNALIGNED-RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-NEXT: ret
;
; CHECK-UNALIGNED-RV32-ZBB-LABEL: memcmp_gt_zero:
@@ -23637,56 +6350,24 @@ define i1 @memcmp_gt_zero(ptr %s1, ptr %s2) nounwind {
;
; CHECK-UNALIGNED-RV32-V-LABEL: memcmp_gt_zero:
; CHECK-UNALIGNED-RV32-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV32-V-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV32-V-NEXT: addi a3, a3, -256
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a0, a0, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: srli a4, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV32-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV32-V-NEXT: slli a1, a1, 24
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV32-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV32-V-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-V-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV32-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV32-V-NEXT: sgtz a0, a0
+; CHECK-UNALIGNED-RV32-V-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV32-V-NEXT: ret
;
; CHECK-UNALIGNED-RV64-V-LABEL: memcmp_gt_zero:
; CHECK-UNALIGNED-RV64-V: # %bb.0: # %entry
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a0, 0(a0)
-; CHECK-UNALIGNED-RV64-V-NEXT: lw a1, 0(a1)
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a0, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: lui a3, 16
-; CHECK-UNALIGNED-RV64-V-NEXT: addiw a3, a3, -256
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a4, a0, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a4, a4, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a0, a0, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: or a0, a0, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: srli a2, a1, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: and a2, a2, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: srliw a4, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a2, a2, a4
-; CHECK-UNALIGNED-RV64-V-NEXT: and a3, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: slli a3, a3, 8
-; CHECK-UNALIGNED-RV64-V-NEXT: slliw a1, a1, 24
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a3
-; CHECK-UNALIGNED-RV64-V-NEXT: or a1, a1, a2
-; CHECK-UNALIGNED-RV64-V-NEXT: sltu a0, a1, a0
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-V-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-V-NEXT: li a2, 4
+; CHECK-UNALIGNED-RV64-V-NEXT: call memcmp
+; CHECK-UNALIGNED-RV64-V-NEXT: sgtz a0, a0
+; CHECK-UNALIGNED-RV64-V-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-V-NEXT: addi sp, sp, 16
; CHECK-UNALIGNED-RV64-V-NEXT: ret
entry:
%memcmp = call signext i32 @memcmp(ptr %s1, ptr %s2, iXLen 4)
More information about the llvm-branch-commits
mailing list