[llvm] 760b172 - [X86] Add additional test coverage for half libcall expansion/promotion

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 28 03:23:01 PDT 2024


Author: Simon Pilgrim
Date: 2024-08-28T11:22:53+01:00
New Revision: 760b172de69940ea0ea81484d042da0ded91e6e2

URL: https://github.com/llvm/llvm-project/commit/760b172de69940ea0ea81484d042da0ded91e6e2
DIFF: https://github.com/llvm/llvm-project/commit/760b172de69940ea0ea81484d042da0ded91e6e2.diff

LOG: [X86] Add additional test coverage for half libcall expansion/promotion

Just need to add powi test with #105775

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/fp16-libcalls.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/fp16-libcalls.ll b/llvm/test/CodeGen/X86/fp16-libcalls.ll
index db3d031a8fe3fb..933971212f11de 100644
--- a/llvm/test/CodeGen/X86/fp16-libcalls.ll
+++ b/llvm/test/CodeGen/X86/fp16-libcalls.ll
@@ -59,6 +59,49 @@ define void @test_half_ceil(half %a0, ptr %p0) nounwind {
   ret void
 }
 
+define void @test_half_copysign(half %a0, half %a1, ptr %p0) nounwind {
+; F16C-LABEL: test_half_copysign:
+; F16C:       # %bb.0:
+; F16C-NEXT:    vpextrw $0, %xmm1, %eax
+; F16C-NEXT:    andl $32768, %eax # imm = 0x8000
+; F16C-NEXT:    vpextrw $0, %xmm0, %ecx
+; F16C-NEXT:    andl $32767, %ecx # imm = 0x7FFF
+; F16C-NEXT:    orl %eax, %ecx
+; F16C-NEXT:    movw %cx, (%rdi)
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_copysign:
+; FP16:       # %bb.0:
+; FP16-NEXT:    vpbroadcastw {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
+; FP16-NEXT:    vpternlogd $202, %xmm1, %xmm0, %xmm2
+; FP16-NEXT:    vmovsh %xmm2, (%rdi)
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_copysign:
+; X64:       # %bb.0:
+; X64-NEXT:    pextrw $0, %xmm1, %eax
+; X64-NEXT:    andl $32768, %eax # imm = 0x8000
+; X64-NEXT:    pextrw $0, %xmm0, %ecx
+; X64-NEXT:    andl $32767, %ecx # imm = 0x7FFF
+; X64-NEXT:    orl %eax, %ecx
+; X64-NEXT:    movw %cx, (%rdi)
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_copysign:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    andl $32768, %ecx # imm = 0x8000
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    andl $32767, %edx # imm = 0x7FFF
+; X86-NEXT:    orl %ecx, %edx
+; X86-NEXT:    movw %dx, (%eax)
+; X86-NEXT:    retl
+  %res = call half @llvm.copysign.half(half %a0, half %a1)
+  store half %res, ptr %p0, align 2
+  ret void
+}
+
 define void @test_half_cos(half %a0, ptr %p0) nounwind {
 ; F16C-LABEL: test_half_cos:
 ; F16C:       # %bb.0:
@@ -67,37 +110,602 @@ define void @test_half_cos(half %a0, ptr %p0) nounwind {
 ; F16C-NEXT:    vpextrw $0, %xmm0, %eax
 ; F16C-NEXT:    vmovd %eax, %xmm0
 ; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
-; F16C-NEXT:    callq cosf at PLT
+; F16C-NEXT:    callq cosf at PLT
+; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT:    vmovd %xmm0, %eax
+; F16C-NEXT:    movw %ax, (%rbx)
+; F16C-NEXT:    popq %rbx
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_cos:
+; FP16:       # %bb.0:
+; FP16-NEXT:    pushq %rbx
+; FP16-NEXT:    movq %rdi, %rbx
+; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    callq cosf at PLT
+; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    vmovsh %xmm0, (%rbx)
+; FP16-NEXT:    popq %rbx
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_cos:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    callq cosf at PLT
+; X64-NEXT:    callq __truncsfhf2 at PLT
+; X64-NEXT:    pextrw $0, %xmm0, %eax
+; X64-NEXT:    movw %ax, (%rbx)
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_cos:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll cosf
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll __truncsfhf2
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esi)
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+  %res = call half @llvm.cos.half(half %a0)
+  store half %res, ptr %p0, align 2
+  ret void
+}
+
+define void @test_half_exp(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_exp:
+; F16C:       # %bb.0:
+; F16C-NEXT:    pushq %rbx
+; F16C-NEXT:    movq %rdi, %rbx
+; F16C-NEXT:    vpextrw $0, %xmm0, %eax
+; F16C-NEXT:    vmovd %eax, %xmm0
+; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT:    callq expf at PLT
+; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT:    vmovd %xmm0, %eax
+; F16C-NEXT:    movw %ax, (%rbx)
+; F16C-NEXT:    popq %rbx
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_exp:
+; FP16:       # %bb.0:
+; FP16-NEXT:    pushq %rbx
+; FP16-NEXT:    movq %rdi, %rbx
+; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    callq expf at PLT
+; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    vmovsh %xmm0, (%rbx)
+; FP16-NEXT:    popq %rbx
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_exp:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    callq expf at PLT
+; X64-NEXT:    callq __truncsfhf2 at PLT
+; X64-NEXT:    pextrw $0, %xmm0, %eax
+; X64-NEXT:    movw %ax, (%rbx)
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_exp:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll expf
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll __truncsfhf2
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esi)
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+  %res = call half @llvm.exp.half(half %a0)
+  store half %res, ptr %p0, align 2
+  ret void
+}
+
+define void @test_half_exp2(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_exp2:
+; F16C:       # %bb.0:
+; F16C-NEXT:    pushq %rbx
+; F16C-NEXT:    movq %rdi, %rbx
+; F16C-NEXT:    vpextrw $0, %xmm0, %eax
+; F16C-NEXT:    vmovd %eax, %xmm0
+; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT:    callq exp2f at PLT
+; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT:    vmovd %xmm0, %eax
+; F16C-NEXT:    movw %ax, (%rbx)
+; F16C-NEXT:    popq %rbx
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_exp2:
+; FP16:       # %bb.0:
+; FP16-NEXT:    pushq %rbx
+; FP16-NEXT:    movq %rdi, %rbx
+; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    callq exp2f at PLT
+; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    vmovsh %xmm0, (%rbx)
+; FP16-NEXT:    popq %rbx
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_exp2:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    callq exp2f at PLT
+; X64-NEXT:    callq __truncsfhf2 at PLT
+; X64-NEXT:    pextrw $0, %xmm0, %eax
+; X64-NEXT:    movw %ax, (%rbx)
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_exp2:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll exp2f
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll __truncsfhf2
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esi)
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+  %res = call half @llvm.exp2.half(half %a0)
+  store half %res, ptr %p0, align 2
+  ret void
+}
+
+define void @test_half_exp10(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_exp10:
+; F16C:       # %bb.0:
+; F16C-NEXT:    pushq %rbx
+; F16C-NEXT:    movq %rdi, %rbx
+; F16C-NEXT:    vpextrw $0, %xmm0, %eax
+; F16C-NEXT:    vmovd %eax, %xmm0
+; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT:    callq exp10f at PLT
+; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT:    vmovd %xmm0, %eax
+; F16C-NEXT:    movw %ax, (%rbx)
+; F16C-NEXT:    popq %rbx
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_exp10:
+; FP16:       # %bb.0:
+; FP16-NEXT:    pushq %rbx
+; FP16-NEXT:    movq %rdi, %rbx
+; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    callq exp10f at PLT
+; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    vmovsh %xmm0, (%rbx)
+; FP16-NEXT:    popq %rbx
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_exp10:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    callq exp10f at PLT
+; X64-NEXT:    callq __truncsfhf2 at PLT
+; X64-NEXT:    pextrw $0, %xmm0, %eax
+; X64-NEXT:    movw %ax, (%rbx)
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_exp10:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll exp10f
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll __truncsfhf2
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esi)
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+  %res = call half @llvm.exp10.half(half %a0)
+  store half %res, ptr %p0, align 2
+  ret void
+}
+
+define void @test_half_fabs(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_fabs:
+; F16C:       # %bb.0:
+; F16C-NEXT:    vpextrw $0, %xmm0, %eax
+; F16C-NEXT:    vmovd %eax, %xmm0
+; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT:    vmovd %xmm0, %eax
+; F16C-NEXT:    movw %ax, (%rdi)
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_fabs:
+; FP16:       # %bb.0:
+; FP16-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
+; FP16-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; FP16-NEXT:    vmovsh %xmm0, (%rdi)
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_fabs:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    callq __truncsfhf2 at PLT
+; X64-NEXT:    pextrw $0, %xmm0, %eax
+; X64-NEXT:    movw %ax, (%rbx)
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_fabs:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    movd %xmm0, (%esp)
+; X86-NEXT:    calll __truncsfhf2
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esi)
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+  %res = call half @llvm.fabs.half(half %a0)
+  store half %res, ptr %p0, align 2
+  ret void
+}
+
+define void @test_half_floor(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_floor:
+; F16C:       # %bb.0:
+; F16C-NEXT:    vpextrw $0, %xmm0, %eax
+; F16C-NEXT:    vmovd %eax, %xmm0
+; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT:    vroundss $9, %xmm0, %xmm0, %xmm0
+; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT:    vmovd %xmm0, %eax
+; F16C-NEXT:    movw %ax, (%rdi)
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_floor:
+; FP16:       # %bb.0:
+; FP16-NEXT:    vrndscalesh $9, %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    vmovsh %xmm0, (%rdi)
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_floor:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    callq floorf at PLT
+; X64-NEXT:    callq __truncsfhf2 at PLT
+; X64-NEXT:    pextrw $0, %xmm0, %eax
+; X64-NEXT:    movw %ax, (%rbx)
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_floor:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll floorf
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll __truncsfhf2
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esi)
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+  %res = call half @llvm.floor.half(half %a0)
+  store half %res, ptr %p0, align 2
+  ret void
+}
+
+define void @test_half_fma(half %a0, half %a1, half %a2, ptr %p0) nounwind {
+; F16C-LABEL: test_half_fma:
+; F16C:       # %bb.0:
+; F16C-NEXT:    pushq %rbx
+; F16C-NEXT:    movq %rdi, %rbx
+; F16C-NEXT:    vpextrw $0, %xmm2, %eax
+; F16C-NEXT:    vpextrw $0, %xmm1, %ecx
+; F16C-NEXT:    vpextrw $0, %xmm0, %edx
+; F16C-NEXT:    vmovd %edx, %xmm0
+; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT:    vmovd %ecx, %xmm1
+; F16C-NEXT:    vcvtph2ps %xmm1, %xmm1
+; F16C-NEXT:    vmovd %eax, %xmm2
+; F16C-NEXT:    vcvtph2ps %xmm2, %xmm2
+; F16C-NEXT:    callq fmaf at PLT
+; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT:    vmovd %xmm0, %eax
+; F16C-NEXT:    movw %ax, (%rbx)
+; F16C-NEXT:    popq %rbx
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_fma:
+; FP16:       # %bb.0:
+; FP16-NEXT:    vfmadd213sh %xmm2, %xmm1, %xmm0
+; FP16-NEXT:    vmovsh %xmm0, (%rdi)
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_fma:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    subq $16, %rsp
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    movss %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT:    movaps %xmm1, %xmm0
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT:    movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
+; X64-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; X64-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
+; X64-NEXT:    # xmm1 = mem[0],zero,zero,zero
+; X64-NEXT:    movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 4-byte Reload
+; X64-NEXT:    # xmm2 = mem[0],zero,zero,zero
+; X64-NEXT:    callq fmaf at PLT
+; X64-NEXT:    callq __truncsfhf2 at PLT
+; X64-NEXT:    pextrw $0, %xmm0, %eax
+; X64-NEXT:    movw %ax, (%rbx)
+; X64-NEXT:    addq $16, %rsp
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_fma:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $72, %esp
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
+; X86-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
+; X86-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
+; X86-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll fmaf
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll __truncsfhf2
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esi)
+; X86-NEXT:    addl $72, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+  %res = call half @llvm.fma.half(half %a0, half %a1, half %a2)
+  store half %res, ptr %p0, align 2
+  ret void
+}
+
+define void @test_half_fneg(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_fneg:
+; F16C:       # %bb.0:
+; F16C-NEXT:    vpextrw $0, %xmm0, %eax
+; F16C-NEXT:    vmovd %eax, %xmm0
+; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT:    vmovd %xmm0, %eax
+; F16C-NEXT:    movw %ax, (%rdi)
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_fneg:
+; FP16:       # %bb.0:
+; FP16-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; FP16-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; FP16-NEXT:    vmovsh %xmm0, (%rdi)
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_fneg:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    callq __truncsfhf2 at PLT
+; X64-NEXT:    pextrw $0, %xmm0, %eax
+; X64-NEXT:    movw %ax, (%rbx)
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_fneg:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    movd %xmm0, (%esp)
+; X86-NEXT:    calll __truncsfhf2
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esi)
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+  %res = fneg half %a0
+  store half %res, ptr %p0, align 2
+  ret void
+}
+
+define void @test_half_log(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_log:
+; F16C:       # %bb.0:
+; F16C-NEXT:    pushq %rbx
+; F16C-NEXT:    movq %rdi, %rbx
+; F16C-NEXT:    vpextrw $0, %xmm0, %eax
+; F16C-NEXT:    vmovd %eax, %xmm0
+; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT:    callq logf at PLT
+; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT:    vmovd %xmm0, %eax
+; F16C-NEXT:    movw %ax, (%rbx)
+; F16C-NEXT:    popq %rbx
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_log:
+; FP16:       # %bb.0:
+; FP16-NEXT:    pushq %rbx
+; FP16-NEXT:    movq %rdi, %rbx
+; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    callq logf at PLT
+; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    vmovsh %xmm0, (%rbx)
+; FP16-NEXT:    popq %rbx
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_log:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    callq logf at PLT
+; X64-NEXT:    callq __truncsfhf2 at PLT
+; X64-NEXT:    pextrw $0, %xmm0, %eax
+; X64-NEXT:    movw %ax, (%rbx)
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_log:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll logf
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll __truncsfhf2
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esi)
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+  %res = call half @llvm.log.half(half %a0)
+  store half %res, ptr %p0, align 2
+  ret void
+}
+
+define void @test_half_log2(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_log2:
+; F16C:       # %bb.0:
+; F16C-NEXT:    pushq %rbx
+; F16C-NEXT:    movq %rdi, %rbx
+; F16C-NEXT:    vpextrw $0, %xmm0, %eax
+; F16C-NEXT:    vmovd %eax, %xmm0
+; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT:    callq log2f at PLT
 ; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
 ; F16C-NEXT:    vmovd %xmm0, %eax
 ; F16C-NEXT:    movw %ax, (%rbx)
 ; F16C-NEXT:    popq %rbx
 ; F16C-NEXT:    retq
 ;
-; FP16-LABEL: test_half_cos:
+; FP16-LABEL: test_half_log2:
 ; FP16:       # %bb.0:
 ; FP16-NEXT:    pushq %rbx
 ; FP16-NEXT:    movq %rdi, %rbx
 ; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
-; FP16-NEXT:    callq cosf at PLT
+; FP16-NEXT:    callq log2f at PLT
 ; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
 ; FP16-NEXT:    vmovsh %xmm0, (%rbx)
 ; FP16-NEXT:    popq %rbx
 ; FP16-NEXT:    retq
 ;
-; X64-LABEL: test_half_cos:
+; X64-LABEL: test_half_log2:
 ; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rbx
 ; X64-NEXT:    movq %rdi, %rbx
 ; X64-NEXT:    callq __extendhfsf2 at PLT
-; X64-NEXT:    callq cosf at PLT
+; X64-NEXT:    callq log2f at PLT
 ; X64-NEXT:    callq __truncsfhf2 at PLT
 ; X64-NEXT:    pextrw $0, %xmm0, %eax
 ; X64-NEXT:    movw %ax, (%rbx)
 ; X64-NEXT:    popq %rbx
 ; X64-NEXT:    retq
 ;
-; X86-LABEL: test_half_cos:
+; X86-LABEL: test_half_log2:
 ; X86:       # %bb.0:
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    subl $8, %esp
@@ -107,7 +715,7 @@ define void @test_half_cos(half %a0, ptr %p0) nounwind {
 ; X86-NEXT:    movw %ax, (%esp)
 ; X86-NEXT:    calll __extendhfsf2
 ; X86-NEXT:    fstps (%esp)
-; X86-NEXT:    calll cosf
+; X86-NEXT:    calll log2f
 ; X86-NEXT:    fstps (%esp)
 ; X86-NEXT:    calll __truncsfhf2
 ; X86-NEXT:    pextrw $0, %xmm0, %eax
@@ -115,43 +723,103 @@ define void @test_half_cos(half %a0, ptr %p0) nounwind {
 ; X86-NEXT:    addl $8, %esp
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
-  %res = call half @llvm.cos.half(half %a0)
+  %res = call half @llvm.log2.half(half %a0)
   store half %res, ptr %p0, align 2
   ret void
 }
 
-define void @test_half_fabs(half %a0, ptr %p0) nounwind {
-; F16C-LABEL: test_half_fabs:
+define void @test_half_log10(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_log10:
 ; F16C:       # %bb.0:
+; F16C-NEXT:    pushq %rbx
+; F16C-NEXT:    movq %rdi, %rbx
 ; F16C-NEXT:    vpextrw $0, %xmm0, %eax
 ; F16C-NEXT:    vmovd %eax, %xmm0
 ; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
-; F16C-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; F16C-NEXT:    callq log10f at PLT
+; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT:    vmovd %xmm0, %eax
+; F16C-NEXT:    movw %ax, (%rbx)
+; F16C-NEXT:    popq %rbx
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_log10:
+; FP16:       # %bb.0:
+; FP16-NEXT:    pushq %rbx
+; FP16-NEXT:    movq %rdi, %rbx
+; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    callq log10f at PLT
+; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    vmovsh %xmm0, (%rbx)
+; FP16-NEXT:    popq %rbx
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_log10:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    callq log10f at PLT
+; X64-NEXT:    callq __truncsfhf2 at PLT
+; X64-NEXT:    pextrw $0, %xmm0, %eax
+; X64-NEXT:    movw %ax, (%rbx)
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_log10:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll log10f
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll __truncsfhf2
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esi)
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+  %res = call half @llvm.log10.half(half %a0)
+  store half %res, ptr %p0, align 2
+  ret void
+}
+
+define void @test_half_nearbyint(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_nearbyint:
+; F16C:       # %bb.0:
+; F16C-NEXT:    vpextrw $0, %xmm0, %eax
+; F16C-NEXT:    vmovd %eax, %xmm0
+; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT:    vroundss $12, %xmm0, %xmm0, %xmm0
 ; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
 ; F16C-NEXT:    vmovd %xmm0, %eax
 ; F16C-NEXT:    movw %ax, (%rdi)
 ; F16C-NEXT:    retq
 ;
-; FP16-LABEL: test_half_fabs:
+; FP16-LABEL: test_half_nearbyint:
 ; FP16:       # %bb.0:
-; FP16-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; FP16-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; FP16-NEXT:    vrndscalesh $12, %xmm0, %xmm0, %xmm0
 ; FP16-NEXT:    vmovsh %xmm0, (%rdi)
 ; FP16-NEXT:    retq
 ;
-; X64-LABEL: test_half_fabs:
+; X64-LABEL: test_half_nearbyint:
 ; X64:       # %bb.0:
 ; X64-NEXT:    pushq %rbx
 ; X64-NEXT:    movq %rdi, %rbx
 ; X64-NEXT:    callq __extendhfsf2 at PLT
-; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    callq nearbyintf at PLT
 ; X64-NEXT:    callq __truncsfhf2 at PLT
 ; X64-NEXT:    pextrw $0, %xmm0, %eax
 ; X64-NEXT:    movw %ax, (%rbx)
 ; X64-NEXT:    popq %rbx
 ; X64-NEXT:    retq
 ;
-; X86-LABEL: test_half_fabs:
+; X86-LABEL: test_half_nearbyint:
 ; X86:       # %bb.0:
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    subl $8, %esp
@@ -160,17 +828,16 @@ define void @test_half_fabs(half %a0, ptr %p0) nounwind {
 ; X86-NEXT:    pextrw $0, %xmm0, %eax
 ; X86-NEXT:    movw %ax, (%esp)
 ; X86-NEXT:    calll __extendhfsf2
-; X86-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT:    movd %xmm0, (%esp)
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll nearbyintf
+; X86-NEXT:    fstps (%esp)
 ; X86-NEXT:    calll __truncsfhf2
 ; X86-NEXT:    pextrw $0, %xmm0, %eax
 ; X86-NEXT:    movw %ax, (%esi)
 ; X86-NEXT:    addl $8, %esp
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
-  %res = call half @llvm.fabs.half(half %a0)
+  %res = call half @llvm.nearbyint.half(half %a0)
   store half %res, ptr %p0, align 2
   ret void
 }
@@ -259,6 +926,59 @@ define void @test_half_pow(half %a0, half %a1, ptr %p0) nounwind {
   ret void
 }
 
+define void @test_half_rint(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_rint:
+; F16C:       # %bb.0:
+; F16C-NEXT:    vpextrw $0, %xmm0, %eax
+; F16C-NEXT:    vmovd %eax, %xmm0
+; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT:    vroundss $4, %xmm0, %xmm0, %xmm0
+; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT:    vmovd %xmm0, %eax
+; F16C-NEXT:    movw %ax, (%rdi)
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_rint:
+; FP16:       # %bb.0:
+; FP16-NEXT:    vrndscalesh $4, %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    vmovsh %xmm0, (%rdi)
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_rint:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    callq rintf at PLT
+; X64-NEXT:    callq __truncsfhf2 at PLT
+; X64-NEXT:    pextrw $0, %xmm0, %eax
+; X64-NEXT:    movw %ax, (%rbx)
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_rint:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll rintf
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll __truncsfhf2
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esi)
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+  %res = call half @llvm.rint.half(half %a0)
+  store half %res, ptr %p0, align 2
+  ret void
+}
+
 define void @test_half_sin(half %a0, ptr %p0) nounwind {
 ; F16C-LABEL: test_half_sin:
 ; F16C:       # %bb.0:
@@ -373,3 +1093,117 @@ define void @test_half_sqrt(half %a0, ptr %p0) nounwind {
   store half %res, ptr %p0, align 2
   ret void
 }
+
+define void @test_half_tan(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_tan:
+; F16C:       # %bb.0:
+; F16C-NEXT:    pushq %rbx
+; F16C-NEXT:    movq %rdi, %rbx
+; F16C-NEXT:    vpextrw $0, %xmm0, %eax
+; F16C-NEXT:    vmovd %eax, %xmm0
+; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT:    callq tanf at PLT
+; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT:    vmovd %xmm0, %eax
+; F16C-NEXT:    movw %ax, (%rbx)
+; F16C-NEXT:    popq %rbx
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_tan:
+; FP16:       # %bb.0:
+; FP16-NEXT:    pushq %rbx
+; FP16-NEXT:    movq %rdi, %rbx
+; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    callq tanf at PLT
+; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    vmovsh %xmm0, (%rbx)
+; FP16-NEXT:    popq %rbx
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_tan:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    callq tanf at PLT
+; X64-NEXT:    callq __truncsfhf2 at PLT
+; X64-NEXT:    pextrw $0, %xmm0, %eax
+; X64-NEXT:    movw %ax, (%rbx)
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_tan:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll tanf
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll __truncsfhf2
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esi)
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+  %res = call half @llvm.tan.half(half %a0)
+  store half %res, ptr %p0, align 2
+  ret void
+}
+
+define void @test_half_trunc(half %a0, ptr %p0) nounwind {
+; F16C-LABEL: test_half_trunc:
+; F16C:       # %bb.0:
+; F16C-NEXT:    vpextrw $0, %xmm0, %eax
+; F16C-NEXT:    vmovd %eax, %xmm0
+; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT:    vroundss $11, %xmm0, %xmm0, %xmm0
+; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; F16C-NEXT:    vmovd %xmm0, %eax
+; F16C-NEXT:    movw %ax, (%rdi)
+; F16C-NEXT:    retq
+;
+; FP16-LABEL: test_half_trunc:
+; FP16:       # %bb.0:
+; FP16-NEXT:    vrndscalesh $11, %xmm0, %xmm0, %xmm0
+; FP16-NEXT:    vmovsh %xmm0, (%rdi)
+; FP16-NEXT:    retq
+;
+; X64-LABEL: test_half_trunc:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movq %rdi, %rbx
+; X64-NEXT:    callq __extendhfsf2 at PLT
+; X64-NEXT:    callq truncf at PLT
+; X64-NEXT:    callq __truncsfhf2 at PLT
+; X64-NEXT:    pextrw $0, %xmm0, %eax
+; X64-NEXT:    movw %ax, (%rbx)
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_half_trunc:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esp)
+; X86-NEXT:    calll __extendhfsf2
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll truncf
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll __truncsfhf2
+; X86-NEXT:    pextrw $0, %xmm0, %eax
+; X86-NEXT:    movw %ax, (%esi)
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+  %res = call half @llvm.trunc.half(half %a0)
+  store half %res, ptr %p0, align 2
+  ret void
+}


        


More information about the llvm-commits mailing list