[llvm] [RISCV] Add tests for memcmp expansion (PR #107824)

Pengcheng Wang via llvm-commits llvm-commits at lists.llvm.org
Sat Oct 12 00:35:40 PDT 2024


================
@@ -0,0 +1,3661 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -O2  \
+; RUN:   | FileCheck %s --check-prefix=CHECK-ALIGNED-RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -O2  \
+; RUN:   | FileCheck %s --check-prefix=CHECK-ALIGNED-RV64
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zbb,+zbkb -O2  \
+; RUN:   | FileCheck %s --check-prefix=CHECK-ALIGNED-RV32-ZBB-ZBKB
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zbb,+zbkb -O2  \
+; RUN:   | FileCheck %s --check-prefix=CHECK-ALIGNED-RV64-ZBB-ZBKB
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -O2  \
+; RUN:   | FileCheck %s --check-prefixes=CHECK-ALIGNED-RV32,CHECK-ALIGNED-RV32-V
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -O2  \
+; RUN:   | FileCheck %s --check-prefixes=CHECK-ALIGNED-RV64,CHECK-ALIGNED-RV64-V
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+unaligned-scalar-mem -O2 \
+; RUN:   | FileCheck %s --check-prefix=CHECK-UNALIGNED-RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+unaligned-scalar-mem -O2 \
+; RUN:   | FileCheck %s --check-prefix=CHECK-UNALIGNED-RV64
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zbb,+zbkb,+unaligned-scalar-mem -O2 \
+; RUN:   | FileCheck %s --check-prefix=CHECK-UNALIGNED-RV32-ZBB-ZBKB
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zbb,+zbkb,+unaligned-scalar-mem -O2 \
+; RUN:   | FileCheck %s --check-prefix=CHECK-UNALIGNED-RV64-ZBB-ZBKB
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+unaligned-scalar-mem,+unaligned-vector-mem -O2 \
+; RUN:   | FileCheck %s --check-prefixes=CHECK-UNALIGNED-RV32,CHECK-UNALIGNED-RV32-V
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+unaligned-scalar-mem,+unaligned-vector-mem -O2 \
+; RUN:   | FileCheck %s --check-prefixes=CHECK-UNALIGNED-RV64,CHECK-UNALIGNED-RV64-V
+
+declare i32 @bcmp(ptr, ptr, iXLen) nounwind readonly
+declare i32 @memcmp(ptr, ptr, iXLen) nounwind readonly
+
+define i32 @bcmp_size_0(ptr %s1, ptr %s2) nounwind optsize {
+; CHECK-ALIGNED-RV32-LABEL: bcmp_size_0:
+; CHECK-ALIGNED-RV32:       # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-NEXT:    addi sp, sp, -16
+; CHECK-ALIGNED-RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-NEXT:    li a2, 0
+; CHECK-ALIGNED-RV32-NEXT:    call bcmp
+; CHECK-ALIGNED-RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-NEXT:    addi sp, sp, 16
+; CHECK-ALIGNED-RV32-NEXT:    ret
+;
+; CHECK-ALIGNED-RV64-LABEL: bcmp_size_0:
+; CHECK-ALIGNED-RV64:       # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-NEXT:    addi sp, sp, -16
+; CHECK-ALIGNED-RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-NEXT:    li a2, 0
+; CHECK-ALIGNED-RV64-NEXT:    call bcmp
+; CHECK-ALIGNED-RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-NEXT:    addi sp, sp, 16
+; CHECK-ALIGNED-RV64-NEXT:    ret
+;
+; CHECK-ALIGNED-RV32-ZBB-ZBKB-LABEL: bcmp_size_0:
+; CHECK-ALIGNED-RV32-ZBB-ZBKB:       # %bb.0: # %entry
+; CHECK-ALIGNED-RV32-ZBB-ZBKB-NEXT:    addi sp, sp, -16
+; CHECK-ALIGNED-RV32-ZBB-ZBKB-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-ALIGNED-RV32-ZBB-ZBKB-NEXT:    li a2, 0
+; CHECK-ALIGNED-RV32-ZBB-ZBKB-NEXT:    call bcmp
+; CHECK-ALIGNED-RV32-ZBB-ZBKB-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-ALIGNED-RV32-ZBB-ZBKB-NEXT:    addi sp, sp, 16
+; CHECK-ALIGNED-RV32-ZBB-ZBKB-NEXT:    ret
+;
+; CHECK-ALIGNED-RV64-ZBB-ZBKB-LABEL: bcmp_size_0:
+; CHECK-ALIGNED-RV64-ZBB-ZBKB:       # %bb.0: # %entry
+; CHECK-ALIGNED-RV64-ZBB-ZBKB-NEXT:    addi sp, sp, -16
+; CHECK-ALIGNED-RV64-ZBB-ZBKB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-ALIGNED-RV64-ZBB-ZBKB-NEXT:    li a2, 0
+; CHECK-ALIGNED-RV64-ZBB-ZBKB-NEXT:    call bcmp
+; CHECK-ALIGNED-RV64-ZBB-ZBKB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-ALIGNED-RV64-ZBB-ZBKB-NEXT:    addi sp, sp, 16
+; CHECK-ALIGNED-RV64-ZBB-ZBKB-NEXT:    ret
+;
+; CHECK-UNALIGNED-RV32-LABEL: bcmp_size_0:
+; CHECK-UNALIGNED-RV32:       # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-NEXT:    addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-NEXT:    li a2, 0
+; CHECK-UNALIGNED-RV32-NEXT:    call bcmp
+; CHECK-UNALIGNED-RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-NEXT:    addi sp, sp, 16
+; CHECK-UNALIGNED-RV32-NEXT:    ret
+;
+; CHECK-UNALIGNED-RV64-LABEL: bcmp_size_0:
+; CHECK-UNALIGNED-RV64:       # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-NEXT:    addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-NEXT:    li a2, 0
+; CHECK-UNALIGNED-RV64-NEXT:    call bcmp
+; CHECK-UNALIGNED-RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-NEXT:    addi sp, sp, 16
+; CHECK-UNALIGNED-RV64-NEXT:    ret
+;
+; CHECK-UNALIGNED-RV32-ZBB-ZBKB-LABEL: bcmp_size_0:
+; CHECK-UNALIGNED-RV32-ZBB-ZBKB:       # %bb.0: # %entry
+; CHECK-UNALIGNED-RV32-ZBB-ZBKB-NEXT:    addi sp, sp, -16
+; CHECK-UNALIGNED-RV32-ZBB-ZBKB-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-UNALIGNED-RV32-ZBB-ZBKB-NEXT:    li a2, 0
+; CHECK-UNALIGNED-RV32-ZBB-ZBKB-NEXT:    call bcmp
+; CHECK-UNALIGNED-RV32-ZBB-ZBKB-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-UNALIGNED-RV32-ZBB-ZBKB-NEXT:    addi sp, sp, 16
+; CHECK-UNALIGNED-RV32-ZBB-ZBKB-NEXT:    ret
+;
+; CHECK-UNALIGNED-RV64-ZBB-ZBKB-LABEL: bcmp_size_0:
+; CHECK-UNALIGNED-RV64-ZBB-ZBKB:       # %bb.0: # %entry
+; CHECK-UNALIGNED-RV64-ZBB-ZBKB-NEXT:    addi sp, sp, -16
+; CHECK-UNALIGNED-RV64-ZBB-ZBKB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; CHECK-UNALIGNED-RV64-ZBB-ZBKB-NEXT:    li a2, 0
+; CHECK-UNALIGNED-RV64-ZBB-ZBKB-NEXT:    call bcmp
+; CHECK-UNALIGNED-RV64-ZBB-ZBKB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-UNALIGNED-RV64-ZBB-ZBKB-NEXT:    addi sp, sp, 16
+; CHECK-UNALIGNED-RV64-ZBB-ZBKB-NEXT:    ret
+entry:
+  %bcmp = call i32 @bcmp(ptr %s1, ptr %s2, iXLen 0)
----------------
wangpc-pp wrote:

Oh I see, the sext.w are removed after expansion so I didn't notice this. Added now.

https://github.com/llvm/llvm-project/pull/107824


More information about the llvm-commits mailing list