[llvm] df97673 - [X86] Add some initial test coverage for half libcall expansion/promotion
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 23 10:08:51 PDT 2024
Author: Simon Pilgrim
Date: 2024-08-23T18:08:33+01:00
New Revision: df9767385701b6bb2ff0411ad6b407bcefbfe34c
URL: https://github.com/llvm/llvm-project/commit/df9767385701b6bb2ff0411ad6b407bcefbfe34c
DIFF: https://github.com/llvm/llvm-project/commit/df9767385701b6bb2ff0411ad6b407bcefbfe34c.diff
LOG: [X86] Add some initial test coverage for half libcall expansion/promotion
We can add additional tests in the future, but this is an initial placeholder
Inspired by #105775
Added:
llvm/test/CodeGen/X86/fp16-libcalls.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/fp16-libcalls.ll b/llvm/test/CodeGen/X86/fp16-libcalls.ll
new file mode 100644
index 00000000000000..db3d031a8fe3fb
--- /dev/null
+++ b/llvm/test/CodeGen/X86/fp16-libcalls.ll
@@ -0,0 +1,375 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -enable-legalize-types-checking -mtriple=x86_64-linux-gnu -mattr=+f16c | FileCheck %s --check-prefix=F16C
+; RUN: llc < %s -enable-legalize-types-checking -mtriple=x86_64-linux-gnu -mattr=+avx512fp16 | FileCheck %s --check-prefix=FP16
+; RUN: llc < %s -enable-legalize-types-checking -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -enable-legalize-types-checking -mtriple=i686-linux-gnu -mattr=sse2 | FileCheck %s --check-prefix=X86
+
+; Check all soft floating point library function calls.
+
+define void @test_half_ceil(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_ceil:
+; F16C: # %bb.0:
+; F16C-NEXT: vpextrw $0, %xmm0, %eax
+; F16C-NEXT: vmovd %eax, %xmm0
+; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT: vroundss $10, %xmm0, %xmm0, %xmm0
+; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT: vmovd %xmm0, %eax
+; F16C-NEXT: movw %ax, (%rdi)
+; F16C-NEXT: retq
+;
+; FP16-LABEL: test_half_ceil:
+; FP16: # %bb.0:
+; FP16-NEXT: vrndscalesh $10, %xmm0, %xmm0, %xmm0
+; FP16-NEXT: vmovsh %xmm0, (%rdi)
+; FP16-NEXT: retq
+;
+; X64-LABEL: test_half_ceil:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbx
+; X64-NEXT: movq %rdi, %rbx
+; X64-NEXT: callq __extendhfsf2 at PLT
+; X64-NEXT: callq ceilf at PLT
+; X64-NEXT: callq __truncsfhf2 at PLT
+; X64-NEXT: pextrw $0, %xmm0, %eax
+; X64-NEXT: movw %ax, (%rbx)
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+;
+; X86-LABEL: test_half_ceil:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: pextrw $0, %xmm0, %eax
+; X86-NEXT: movw %ax, (%esp)
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll ceilf
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll __truncsfhf2
+; X86-NEXT: pextrw $0, %xmm0, %eax
+; X86-NEXT: movw %ax, (%esi)
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+ %res = call half @llvm.ceil.half(half %a0)
+ store half %res, ptr %p0, align 2
+ ret void
+}
+
+define void @test_half_cos(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_cos:
+; F16C: # %bb.0:
+; F16C-NEXT: pushq %rbx
+; F16C-NEXT: movq %rdi, %rbx
+; F16C-NEXT: vpextrw $0, %xmm0, %eax
+; F16C-NEXT: vmovd %eax, %xmm0
+; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT: callq cosf at PLT
+; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT: vmovd %xmm0, %eax
+; F16C-NEXT: movw %ax, (%rbx)
+; F16C-NEXT: popq %rbx
+; F16C-NEXT: retq
+;
+; FP16-LABEL: test_half_cos:
+; FP16: # %bb.0:
+; FP16-NEXT: pushq %rbx
+; FP16-NEXT: movq %rdi, %rbx
+; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
+; FP16-NEXT: callq cosf at PLT
+; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
+; FP16-NEXT: vmovsh %xmm0, (%rbx)
+; FP16-NEXT: popq %rbx
+; FP16-NEXT: retq
+;
+; X64-LABEL: test_half_cos:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbx
+; X64-NEXT: movq %rdi, %rbx
+; X64-NEXT: callq __extendhfsf2 at PLT
+; X64-NEXT: callq cosf at PLT
+; X64-NEXT: callq __truncsfhf2 at PLT
+; X64-NEXT: pextrw $0, %xmm0, %eax
+; X64-NEXT: movw %ax, (%rbx)
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+;
+; X86-LABEL: test_half_cos:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: pextrw $0, %xmm0, %eax
+; X86-NEXT: movw %ax, (%esp)
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll cosf
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll __truncsfhf2
+; X86-NEXT: pextrw $0, %xmm0, %eax
+; X86-NEXT: movw %ax, (%esi)
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+ %res = call half @llvm.cos.half(half %a0)
+ store half %res, ptr %p0, align 2
+ ret void
+}
+
+define void @test_half_fabs(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_fabs:
+; F16C: # %bb.0:
+; F16C-NEXT: vpextrw $0, %xmm0, %eax
+; F16C-NEXT: vmovd %eax, %xmm0
+; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT: vmovd %xmm0, %eax
+; F16C-NEXT: movw %ax, (%rdi)
+; F16C-NEXT: retq
+;
+; FP16-LABEL: test_half_fabs:
+; FP16: # %bb.0:
+; FP16-NEXT: vpbroadcastw {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
+; FP16-NEXT: vpand %xmm1, %xmm0, %xmm0
+; FP16-NEXT: vmovsh %xmm0, (%rdi)
+; FP16-NEXT: retq
+;
+; X64-LABEL: test_half_fabs:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbx
+; X64-NEXT: movq %rdi, %rbx
+; X64-NEXT: callq __extendhfsf2 at PLT
+; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: callq __truncsfhf2 at PLT
+; X64-NEXT: pextrw $0, %xmm0, %eax
+; X64-NEXT: movw %ax, (%rbx)
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+;
+; X86-LABEL: test_half_fabs:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: pextrw $0, %xmm0, %eax
+; X86-NEXT: movw %ax, (%esp)
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: movd %xmm0, (%esp)
+; X86-NEXT: calll __truncsfhf2
+; X86-NEXT: pextrw $0, %xmm0, %eax
+; X86-NEXT: movw %ax, (%esi)
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+ %res = call half @llvm.fabs.half(half %a0)
+ store half %res, ptr %p0, align 2
+ ret void
+}
+
+define void @test_half_pow(half %a0, half %a1, ptr %p0) nounwind {
+; F16C-LABEL: test_half_pow:
+; F16C: # %bb.0:
+; F16C-NEXT: pushq %rbx
+; F16C-NEXT: movq %rdi, %rbx
+; F16C-NEXT: vpextrw $0, %xmm1, %eax
+; F16C-NEXT: vpextrw $0, %xmm0, %ecx
+; F16C-NEXT: vmovd %ecx, %xmm0
+; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT: vmovd %eax, %xmm1
+; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
+; F16C-NEXT: callq powf at PLT
+; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT: vmovd %xmm0, %eax
+; F16C-NEXT: movw %ax, (%rbx)
+; F16C-NEXT: popq %rbx
+; F16C-NEXT: retq
+;
+; FP16-LABEL: test_half_pow:
+; FP16: # %bb.0:
+; FP16-NEXT: pushq %rbx
+; FP16-NEXT: movq %rdi, %rbx
+; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
+; FP16-NEXT: vcvtsh2ss %xmm1, %xmm1, %xmm1
+; FP16-NEXT: callq powf at PLT
+; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
+; FP16-NEXT: vmovsh %xmm0, (%rbx)
+; FP16-NEXT: popq %rbx
+; FP16-NEXT: retq
+;
+; X64-LABEL: test_half_pow:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbx
+; X64-NEXT: subq $16, %rsp
+; X64-NEXT: movq %rdi, %rbx
+; X64-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT: movaps %xmm1, %xmm0
+; X64-NEXT: callq __extendhfsf2 at PLT
+; X64-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; X64-NEXT: # xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: callq __extendhfsf2 at PLT
+; X64-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
+; X64-NEXT: # xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: callq powf at PLT
+; X64-NEXT: callq __truncsfhf2 at PLT
+; X64-NEXT: pextrw $0, %xmm0, %eax
+; X64-NEXT: movw %ax, (%rbx)
+; X64-NEXT: addq $16, %rsp
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+;
+; X86-LABEL: test_half_pow:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $56, %esp
+; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT: movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: pextrw $0, %xmm0, %eax
+; X86-NEXT: movw %ax, (%esp)
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
+; X86-NEXT: movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT: pextrw $0, %xmm0, %eax
+; X86-NEXT: movw %ax, (%esp)
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll powf
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll __truncsfhf2
+; X86-NEXT: pextrw $0, %xmm0, %eax
+; X86-NEXT: movw %ax, (%esi)
+; X86-NEXT: addl $56, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+ %res = call half @llvm.pow.half(half %a0, half %a1)
+ store half %res, ptr %p0, align 2
+ ret void
+}
+
+define void @test_half_sin(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_sin:
+; F16C: # %bb.0:
+; F16C-NEXT: pushq %rbx
+; F16C-NEXT: movq %rdi, %rbx
+; F16C-NEXT: vpextrw $0, %xmm0, %eax
+; F16C-NEXT: vmovd %eax, %xmm0
+; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT: callq sinf at PLT
+; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT: vmovd %xmm0, %eax
+; F16C-NEXT: movw %ax, (%rbx)
+; F16C-NEXT: popq %rbx
+; F16C-NEXT: retq
+;
+; FP16-LABEL: test_half_sin:
+; FP16: # %bb.0:
+; FP16-NEXT: pushq %rbx
+; FP16-NEXT: movq %rdi, %rbx
+; FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
+; FP16-NEXT: callq sinf at PLT
+; FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
+; FP16-NEXT: vmovsh %xmm0, (%rbx)
+; FP16-NEXT: popq %rbx
+; FP16-NEXT: retq
+;
+; X64-LABEL: test_half_sin:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbx
+; X64-NEXT: movq %rdi, %rbx
+; X64-NEXT: callq __extendhfsf2 at PLT
+; X64-NEXT: callq sinf at PLT
+; X64-NEXT: callq __truncsfhf2 at PLT
+; X64-NEXT: pextrw $0, %xmm0, %eax
+; X64-NEXT: movw %ax, (%rbx)
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+;
+; X86-LABEL: test_half_sin:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: pextrw $0, %xmm0, %eax
+; X86-NEXT: movw %ax, (%esp)
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll sinf
+; X86-NEXT: fstps (%esp)
+; X86-NEXT: calll __truncsfhf2
+; X86-NEXT: pextrw $0, %xmm0, %eax
+; X86-NEXT: movw %ax, (%esi)
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+ %res = call half @llvm.sin.half(half %a0)
+ store half %res, ptr %p0, align 2
+ ret void
+}
+
+define void @test_half_sqrt(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_sqrt:
+; F16C: # %bb.0:
+; F16C-NEXT: vpextrw $0, %xmm0, %eax
+; F16C-NEXT: vmovd %eax, %xmm0
+; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
+; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT: vmovd %xmm0, %eax
+; F16C-NEXT: movw %ax, (%rdi)
+; F16C-NEXT: retq
+;
+; FP16-LABEL: test_half_sqrt:
+; FP16: # %bb.0:
+; FP16-NEXT: vsqrtsh %xmm0, %xmm0, %xmm0
+; FP16-NEXT: vmovsh %xmm0, (%rdi)
+; FP16-NEXT: retq
+;
+; X64-LABEL: test_half_sqrt:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbx
+; X64-NEXT: movq %rdi, %rbx
+; X64-NEXT: callq __extendhfsf2 at PLT
+; X64-NEXT: sqrtss %xmm0, %xmm0
+; X64-NEXT: callq __truncsfhf2 at PLT
+; X64-NEXT: pextrw $0, %xmm0, %eax
+; X64-NEXT: movw %ax, (%rbx)
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+;
+; X86-LABEL: test_half_sqrt:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: pextrw $0, %xmm0, %eax
+; X86-NEXT: movw %ax, (%esp)
+; X86-NEXT: calll __extendhfsf2
+; X86-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: sqrtss %xmm0, %xmm0
+; X86-NEXT: movss %xmm0, (%esp)
+; X86-NEXT: calll __truncsfhf2
+; X86-NEXT: pextrw $0, %xmm0, %eax
+; X86-NEXT: movw %ax, (%esi)
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+ %res = call half @llvm.sqrt.half(half %a0)
+ store half %res, ptr %p0, align 2
+ ret void
+}
More information about the llvm-commits
mailing list