[llvm] 2bcf51c - X86: call fp16-conversion functions soft-float on Darwin.
Tim Northover via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 10 02:00:09 PST 2022
Author: Tim Northover
Date: 2022-11-10T10:00:01Z
New Revision: 2bcf51c7f82ca7752d1bba390a2e0cb5fdd05ca9
URL: https://github.com/llvm/llvm-project/commit/2bcf51c7f82ca7752d1bba390a2e0cb5fdd05ca9
DIFF: https://github.com/llvm/llvm-project/commit/2bcf51c7f82ca7752d1bba390a2e0cb5fdd05ca9.diff
LOG: X86: call fp16-conversion functions soft-float on Darwin.
We've been shipping implementations of these with a soft-float ABI since MacOS
10.10 in 2014 and there's evidence they're in binaries now, so we can't easily
switch to %xmm0.
This emits special libcalls with casts in place to restore the soft-float ABI
for __truncdfhf2, __truncsfhf2, and __extendhfsf2.
Added:
llvm/test/CodeGen/X86/half-darwin.ll
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/fp-round.ll
llvm/test/CodeGen/X86/fp-roundeven.ll
llvm/test/CodeGen/X86/half-constrained.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2884fee57a1a2..a58430d6776cb 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -631,8 +631,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FSUB, MVT::f16, Promote);
setOperationAction(ISD::FMUL, MVT::f16, Promote);
setOperationAction(ISD::FDIV, MVT::f16, Promote);
- setOperationAction(ISD::FP_ROUND, MVT::f16, LibCall);
- setOperationAction(ISD::FP_EXTEND, MVT::f32, LibCall);
+ setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
+ setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom);
setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
setOperationAction(ISD::STRICT_FADD, MVT::f16, Promote);
@@ -660,8 +660,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::STRICT_FROUND, MVT::f16, Promote);
setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Promote);
setOperationAction(ISD::STRICT_FTRUNC, MVT::f16, Promote);
- setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, LibCall);
- setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, LibCall);
+ setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
+ setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
@@ -23167,8 +23167,39 @@ SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, In));
}
- if (!Subtarget.hasF16C())
- return SDValue();
+ if (!Subtarget.hasF16C()) {
+ if (!Subtarget.getTargetTriple().isOSDarwin())
+ return SDValue();
+
+ assert(VT == MVT::f32 && SVT == MVT::f16 && "unexpected extend libcall");
+
+ // Need a libcall, but ABI for f16 is soft-float on MacOS.
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
+
+ In = DAG.getBitcast(MVT::i16, In);
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ Entry.Node = In;
+ Entry.Ty = EVT(MVT::i16).getTypeForEVT(*DAG.getContext());
+ Entry.IsSExt = false;
+ Entry.IsZExt = true;
+ Args.push_back(Entry);
+
+ SDValue Callee = DAG.getExternalSymbol(
+ getLibcallName(RTLIB::FPEXT_F16_F32),
+ getPointerTy(DAG.getDataLayout()));
+ CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
+ CallingConv::C, EVT(VT).getTypeForEVT(*DAG.getContext()), Callee,
+ std::move(Args));
+
+ SDValue Res;
+ std::tie(Res,Chain) = LowerCallTo(CLI);
+ if (IsStrict)
+ Res = DAG.getMergeValues({Res, Chain}, DL);
+
+ return Res;
+ }
In = DAG.getBitcast(MVT::i16, In);
In = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v8i16,
@@ -23230,6 +23261,42 @@ SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
if (SVT == MVT::f128 || (VT == MVT::f16 && SVT == MVT::f80))
return SDValue();
+ if (VT == MVT::f16 && (SVT == MVT::f64 || SVT == MVT::f32) &&
+ !Subtarget.hasFP16() && (SVT == MVT::f64 || !Subtarget.hasF16C())) {
+ if (!Subtarget.getTargetTriple().isOSDarwin())
+ return SDValue();
+
+ // We need a libcall but the ABI for f16 libcalls on MacOS is soft.
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
+
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ Entry.Node = In;
+ Entry.Ty = EVT(SVT).getTypeForEVT(*DAG.getContext());
+ Entry.IsSExt = false;
+ Entry.IsZExt = true;
+ Args.push_back(Entry);
+
+ SDValue Callee = DAG.getExternalSymbol(
+ getLibcallName(SVT == MVT::f64 ? RTLIB::FPROUND_F64_F16
+ : RTLIB::FPROUND_F32_F16),
+ getPointerTy(DAG.getDataLayout()));
+ CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
+ CallingConv::C, EVT(MVT::i16).getTypeForEVT(*DAG.getContext()), Callee,
+ std::move(Args));
+
+ SDValue Res;
+ std::tie(Res, Chain) = LowerCallTo(CLI);
+
+ Res = DAG.getBitcast(MVT::f16, Res);
+
+ if (IsStrict)
+ Res = DAG.getMergeValues({Res, Chain}, DL);
+
+ return Res;
+ }
+
if (VT.getScalarType() == MVT::f16 && !Subtarget.hasFP16()) {
if (!Subtarget.hasF16C() || SVT.getScalarType() != MVT::f32)
return SDValue();
diff --git a/llvm/test/CodeGen/X86/fp-round.ll b/llvm/test/CodeGen/X86/fp-round.ll
index 9218996071ba2..e8f3f069d01b3 100644
--- a/llvm/test/CodeGen/X86/fp-round.ll
+++ b/llvm/test/CodeGen/X86/fp-round.ll
@@ -1,52 +1,55 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE41
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefixes=AVX1
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512F
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512fp16,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512FP16
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-freebsd -mattr=+avx | FileCheck %s --check-prefixes=AVX1
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512F
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512fp16,+avx512vl | FileCheck %s --check-prefixes=AVX512FP16
define half @round_f16(half %h) {
; SSE2-LABEL: round_f16:
-; SSE2: ## %bb.0: ## %entry
+; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: .cfi_def_cfa_offset 16
-; SSE2-NEXT: callq ___extendhfsf2
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: callq ___truncsfhf2
+; SSE2-NEXT: callq __extendhfsf2 at PLT
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: callq __truncsfhf2 at PLT
; SSE2-NEXT: popq %rax
+; SSE2-NEXT: .cfi_def_cfa_offset 8
; SSE2-NEXT: retq
;
; SSE41-LABEL: round_f16:
-; SSE41: ## %bb.0: ## %entry
+; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pushq %rax
; SSE41-NEXT: .cfi_def_cfa_offset 16
-; SSE41-NEXT: callq ___extendhfsf2
+; SSE41-NEXT: callq __extendhfsf2 at PLT
; SSE41-NEXT: movaps {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
; SSE41-NEXT: andps %xmm0, %xmm1
; SSE41-NEXT: orps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE41-NEXT: addss %xmm0, %xmm1
; SSE41-NEXT: xorps %xmm0, %xmm0
; SSE41-NEXT: roundss $11, %xmm1, %xmm0
-; SSE41-NEXT: callq ___truncsfhf2
+; SSE41-NEXT: callq __truncsfhf2 at PLT
; SSE41-NEXT: popq %rax
+; SSE41-NEXT: .cfi_def_cfa_offset 8
; SSE41-NEXT: retq
;
; AVX1-LABEL: round_f16:
-; AVX1: ## %bb.0: ## %entry
+; AVX1: # %bb.0: # %entry
; AVX1-NEXT: pushq %rax
; AVX1-NEXT: .cfi_def_cfa_offset 16
-; AVX1-NEXT: callq ___extendhfsf2
+; AVX1-NEXT: callq __extendhfsf2 at PLT
; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
; AVX1-NEXT: vorps %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: callq ___truncsfhf2
+; AVX1-NEXT: callq __truncsfhf2 at PLT
; AVX1-NEXT: popq %rax
+; AVX1-NEXT: .cfi_def_cfa_offset 8
; AVX1-NEXT: retq
;
; AVX512F-LABEL: round_f16:
-; AVX512F: ## %bb.0: ## %entry
+; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpextrw $0, %xmm0, %eax
; AVX512F-NEXT: movzwl %ax, %eax
; AVX512F-NEXT: vmovd %eax, %xmm0
@@ -75,11 +78,11 @@ entry:
define float @round_f32(float %x) {
; SSE2-LABEL: round_f32:
-; SSE2: ## %bb.0:
-; SSE2-NEXT: jmp _roundf ## TAILCALL
+; SSE2: # %bb.0:
+; SSE2-NEXT: jmp roundf at PLT # TAILCALL
;
; SSE41-LABEL: round_f32:
-; SSE41: ## %bb.0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
; SSE41-NEXT: andps %xmm0, %xmm1
; SSE41-NEXT: orps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -89,7 +92,7 @@ define float @round_f32(float %x) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: round_f32:
-; AVX1: ## %bb.0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
; AVX1-NEXT: vorps %xmm2, %xmm1, %xmm1
@@ -97,24 +100,32 @@ define float @round_f32(float %x) {
; AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
; AVX1-NEXT: retq
;
-; AVX512-LABEL: round_f32:
-; AVX512: ## %bb.0:
-; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
-; AVX512-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1
-; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: round_f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; AVX512F-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1
+; AVX512F-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512FP16-LABEL: round_f32:
+; AVX512FP16: ## %bb.0:
+; AVX512FP16-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; AVX512FP16-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1
+; AVX512FP16-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX512FP16-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
+; AVX512FP16-NEXT: retq
%a = call float @llvm.round.f32(float %x)
ret float %a
}
define double @round_f64(double %x) {
; SSE2-LABEL: round_f64:
-; SSE2: ## %bb.0:
-; SSE2-NEXT: jmp _round ## TAILCALL
+; SSE2: # %bb.0:
+; SSE2-NEXT: jmp round at PLT # TAILCALL
;
; SSE41-LABEL: round_f64:
-; SSE41: ## %bb.0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movapd {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0]
; SSE41-NEXT: andpd %xmm0, %xmm1
; SSE41-NEXT: orpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -124,57 +135,66 @@ define double @round_f64(double %x) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: round_f64:
-; AVX1: ## %bb.0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [4.9999999999999994E-1,4.9999999999999994E-1]
-; AVX1-NEXT: ## xmm2 = mem[0,0]
+; AVX1-NEXT: # xmm2 = mem[0,0]
; AVX1-NEXT: vorpd %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
; AVX1-NEXT: retq
;
-; AVX512-LABEL: round_f64:
-; AVX512: ## %bb.0:
-; AVX512-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4.9999999999999994E-1,4.9999999999999994E-1]
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm1
-; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: round_f64:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4.9999999999999994E-1,4.9999999999999994E-1]
+; AVX512F-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm1
+; AVX512F-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512FP16-LABEL: round_f64:
+; AVX512FP16: ## %bb.0:
+; AVX512FP16-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4.9999999999999994E-1,4.9999999999999994E-1]
+; AVX512FP16-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm1
+; AVX512FP16-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX512FP16-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; AVX512FP16-NEXT: retq
%a = call double @llvm.round.f64(double %x)
ret double %a
}
define <4 x float> @round_v4f32(<4 x float> %x) {
; SSE2-LABEL: round_v4f32:
-; SSE2: ## %bb.0:
+; SSE2: # %bb.0:
; SSE2-NEXT: subq $56, %rsp
; SSE2-NEXT: .cfi_def_cfa_offset 64
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: unpcklps (%rsp), %xmm0 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: unpcklps (%rsp), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: unpcklpd (%rsp), %xmm1 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
+; SSE2-NEXT: unpcklpd (%rsp), %xmm1 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm1 = xmm1[0],mem[0]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: addq $56, %rsp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
; SSE2-NEXT: retq
;
; SSE41-LABEL: round_v4f32:
-; SSE41: ## %bb.0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
; SSE41-NEXT: andps %xmm0, %xmm1
; SSE41-NEXT: orps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -183,43 +203,52 @@ define <4 x float> @round_v4f32(<4 x float> %x) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: round_v4f32:
-; AVX1: ## %bb.0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vroundps $11, %xmm0, %xmm0
; AVX1-NEXT: retq
;
-; AVX512-LABEL: round_v4f32:
-; AVX512: ## %bb.0:
-; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
-; AVX512-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1
-; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vroundps $11, %xmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: round_v4f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; AVX512F-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1
+; AVX512F-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: vroundps $11, %xmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512FP16-LABEL: round_v4f32:
+; AVX512FP16: ## %bb.0:
+; AVX512FP16-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; AVX512FP16-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1
+; AVX512FP16-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; AVX512FP16-NEXT: vroundps $11, %xmm0, %xmm0
+; AVX512FP16-NEXT: retq
%a = call <4 x float> @llvm.round.v4f32(<4 x float> %x)
ret <4 x float> %a
}
define <2 x double> @round_v2f64(<2 x double> %x) {
; SSE2-LABEL: round_v2f64:
-; SSE2: ## %bb.0:
+; SSE2: # %bb.0:
; SSE2-NEXT: subq $40, %rsp
; SSE2-NEXT: .cfi_def_cfa_offset 48
-; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: addq $40, %rsp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
; SSE2-NEXT: retq
;
; SSE41-LABEL: round_v2f64:
-; SSE41: ## %bb.0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movapd {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0]
; SSE41-NEXT: andpd %xmm0, %xmm1
; SSE41-NEXT: orpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -228,77 +257,86 @@ define <2 x double> @round_v2f64(<2 x double> %x) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: round_v2f64:
-; AVX1: ## %bb.0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vroundpd $11, %xmm0, %xmm0
; AVX1-NEXT: retq
;
-; AVX512-LABEL: round_v2f64:
-; AVX512: ## %bb.0:
-; AVX512-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4.9999999999999994E-1,4.9999999999999994E-1]
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm1
-; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vroundpd $11, %xmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: round_v2f64:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4.9999999999999994E-1,4.9999999999999994E-1]
+; AVX512F-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm1
+; AVX512F-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: vroundpd $11, %xmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512FP16-LABEL: round_v2f64:
+; AVX512FP16: ## %bb.0:
+; AVX512FP16-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4.9999999999999994E-1,4.9999999999999994E-1]
+; AVX512FP16-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm1
+; AVX512FP16-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; AVX512FP16-NEXT: vroundpd $11, %xmm0, %xmm0
+; AVX512FP16-NEXT: retq
%a = call <2 x double> @llvm.round.v2f64(<2 x double> %x)
ret <2 x double> %a
}
define <8 x float> @round_v8f32(<8 x float> %x) {
; SSE2-LABEL: round_v8f32:
-; SSE2: ## %bb.0:
+; SSE2: # %bb.0:
; SSE2-NEXT: subq $72, %rsp
; SSE2-NEXT: .cfi_def_cfa_offset 80
-; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
+; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
-; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm1 = xmm1[0],mem[0]
+; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: unpcklps (%rsp), %xmm0 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: unpcklps (%rsp), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: unpcklpd (%rsp), %xmm1 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: unpcklpd (%rsp), %xmm1 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm1 = xmm1[0],mem[0]
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: addq $72, %rsp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
; SSE2-NEXT: retq
;
; SSE41-LABEL: round_v8f32:
-; SSE41: ## %bb.0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
; SSE41-NEXT: movaps %xmm0, %xmm3
; SSE41-NEXT: andps %xmm2, %xmm3
@@ -313,53 +351,62 @@ define <8 x float> @round_v8f32(<8 x float> %x) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: round_v8f32:
-; AVX1: ## %bb.0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
; AVX1-NEXT: vorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX1-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vroundps $11, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX512-LABEL: round_v8f32:
-; AVX512: ## %bb.0:
-; AVX512-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
-; AVX512-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm1
-; AVX512-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vroundps $11, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: round_v8f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; AVX512F-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm1
+; AVX512F-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vroundps $11, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512FP16-LABEL: round_v8f32:
+; AVX512FP16: ## %bb.0:
+; AVX512FP16-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; AVX512FP16-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm1
+; AVX512FP16-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX512FP16-NEXT: vroundps $11, %ymm0, %ymm0
+; AVX512FP16-NEXT: retq
%a = call <8 x float> @llvm.round.v8f32(<8 x float> %x)
ret <8 x float> %a
}
define <4 x double> @round_v4f64(<4 x double> %x) {
; SSE2-LABEL: round_v4f64:
-; SSE2: ## %bb.0:
+; SSE2: # %bb.0:
; SSE2-NEXT: subq $56, %rsp
; SSE2-NEXT: .cfi_def_cfa_offset 64
-; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: addq $56, %rsp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
; SSE2-NEXT: retq
;
; SSE41-LABEL: round_v4f64:
-; SSE41: ## %bb.0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movapd {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
; SSE41-NEXT: movapd %xmm0, %xmm3
; SSE41-NEXT: andpd %xmm2, %xmm3
@@ -374,123 +421,132 @@ define <4 x double> @round_v4f64(<4 x double> %x) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: round_v4f64:
-; AVX1: ## %bb.0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vandpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
; AVX1-NEXT: vorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
; AVX1-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vroundpd $11, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX512-LABEL: round_v4f64:
-; AVX512: ## %bb.0:
-; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1]
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm1
-; AVX512-NEXT: vaddpd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vroundpd $11, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: round_v4f64:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1]
+; AVX512F-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm1
+; AVX512F-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512FP16-LABEL: round_v4f64:
+; AVX512FP16: ## %bb.0:
+; AVX512FP16-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1]
+; AVX512FP16-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm1
+; AVX512FP16-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX512FP16-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX512FP16-NEXT: retq
%a = call <4 x double> @llvm.round.v4f64(<4 x double> %x)
ret <4 x double> %a
}
define <16 x float> @round_v16f32(<16 x float> %x) {
; SSE2-LABEL: round_v16f32:
-; SSE2: ## %bb.0:
+; SSE2: # %bb.0:
; SSE2-NEXT: subq $104, %rsp
; SSE2-NEXT: .cfi_def_cfa_offset 112
-; SSE2-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; SSE2-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
-; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm1 = xmm1[0],mem[0]
+; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
-; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm1 = xmm1[0],mem[0]
+; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: unpcklps (%rsp), %xmm0 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: unpcklps (%rsp), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
-; SSE2-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm1 = xmm1[0],mem[0]
+; SSE2-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; SSE2-NEXT: callq _roundf
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 ## 16-byte Reload
+; SSE2-NEXT: callq roundf at PLT
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE2-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
-; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 ## 16-byte Folded Reload
-; SSE2-NEXT: ## xmm3 = xmm3[0],mem[0]
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE2-NEXT: movaps (%rsp), %xmm2 ## 16-byte Reload
+; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE2-NEXT: # xmm3 = xmm3[0],mem[0]
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE2-NEXT: movaps (%rsp), %xmm2 # 16-byte Reload
; SSE2-NEXT: addq $104, %rsp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
; SSE2-NEXT: retq
;
; SSE41-LABEL: round_v16f32:
-; SSE41: ## %bb.0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movaps {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
; SSE41-NEXT: movaps %xmm0, %xmm5
; SSE41-NEXT: andps %xmm4, %xmm5
@@ -515,7 +571,7 @@ define <16 x float> @round_v16f32(<16 x float> %x) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: round_v16f32:
-; AVX1: ## %bb.0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm3
; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
@@ -528,68 +584,77 @@ define <16 x float> @round_v16f32(<16 x float> %x) {
; AVX1-NEXT: vroundps $11, %ymm1, %ymm1
; AVX1-NEXT: retq
;
-; AVX512-LABEL: round_v16f32:
-; AVX512: ## %bb.0:
-; AVX512-NEXT: vpbroadcastd {{.*#+}} zmm1 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
-; AVX512-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm1
-; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vrndscaleps $11, %zmm0, %zmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: round_v16f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} zmm1 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; AVX512F-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm1
+; AVX512F-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vrndscaleps $11, %zmm0, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512FP16-LABEL: round_v16f32:
+; AVX512FP16: ## %bb.0:
+; AVX512FP16-NEXT: vpbroadcastd {{.*#+}} zmm1 = [4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1,4.9999997E-1]
+; AVX512FP16-NEXT: vpternlogd $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm1
+; AVX512FP16-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; AVX512FP16-NEXT: vrndscaleps $11, %zmm0, %zmm0
+; AVX512FP16-NEXT: retq
%a = call <16 x float> @llvm.round.v16f32(<16 x float> %x)
ret <16 x float> %a
}
define <8 x double> @round_v8f64(<8 x double> %x) {
; SSE2-LABEL: round_v8f64:
-; SSE2: ## %bb.0:
+; SSE2: # %bb.0:
; SSE2-NEXT: subq $88, %rsp
; SSE2-NEXT: .cfi_def_cfa_offset 96
-; SSE2-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE2-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; SSE2-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; SSE2-NEXT: callq _round
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 ## 16-byte Reload
+; SSE2-NEXT: callq round at PLT
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE2-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
-; SSE2-NEXT: movaps (%rsp), %xmm2 ## 16-byte Reload
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE2-NEXT: movaps (%rsp), %xmm2 # 16-byte Reload
; SSE2-NEXT: addq $88, %rsp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
; SSE2-NEXT: retq
;
; SSE41-LABEL: round_v8f64:
-; SSE41: ## %bb.0:
+; SSE41: # %bb.0:
; SSE41-NEXT: movapd {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0]
; SSE41-NEXT: movapd %xmm0, %xmm5
; SSE41-NEXT: andpd %xmm4, %xmm5
@@ -614,7 +679,7 @@ define <8 x double> @round_v8f64(<8 x double> %x) {
; SSE41-NEXT: retq
;
; AVX1-LABEL: round_v8f64:
-; AVX1: ## %bb.0:
+; AVX1: # %bb.0:
; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
; AVX1-NEXT: vandpd %ymm2, %ymm0, %ymm3
; AVX1-NEXT: vmovapd {{.*#+}} ymm4 = [4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1]
@@ -627,13 +692,21 @@ define <8 x double> @round_v8f64(<8 x double> %x) {
; AVX1-NEXT: vroundpd $11, %ymm1, %ymm1
; AVX1-NEXT: retq
;
-; AVX512-LABEL: round_v8f64:
-; AVX512: ## %bb.0:
-; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm1 = [4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1]
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm1
-; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vrndscalepd $11, %zmm0, %zmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: round_v8f64:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastq {{.*#+}} zmm1 = [4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1]
+; AVX512F-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm1
+; AVX512F-NEXT: vaddpd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vrndscalepd $11, %zmm0, %zmm0
+; AVX512F-NEXT: retq
+;
+; AVX512FP16-LABEL: round_v8f64:
+; AVX512FP16: ## %bb.0:
+; AVX512FP16-NEXT: vpbroadcastq {{.*#+}} zmm1 = [4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1,4.9999999999999994E-1]
+; AVX512FP16-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm1
+; AVX512FP16-NEXT: vaddpd %zmm1, %zmm0, %zmm0
+; AVX512FP16-NEXT: vrndscalepd $11, %zmm0, %zmm0
+; AVX512FP16-NEXT: retq
%a = call <8 x double> @llvm.round.v8f64(<8 x double> %x)
ret <8 x double> %a
}
diff --git a/llvm/test/CodeGen/X86/fp-roundeven.ll b/llvm/test/CodeGen/X86/fp-roundeven.ll
index 6e087a383b1d1..fed2060dabd3a 100644
--- a/llvm/test/CodeGen/X86/fp-roundeven.ll
+++ b/llvm/test/CodeGen/X86/fp-roundeven.ll
@@ -10,9 +10,13 @@ define half @roundeven_f16(half %h) {
; SSE2: ## %bb.0: ## %entry
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: .cfi_def_cfa_offset 16
+; SSE2-NEXT: pextrw $0, %xmm0, %eax
+; SSE2-NEXT: movzwl %ax, %edi
; SSE2-NEXT: callq ___extendhfsf2
; SSE2-NEXT: callq _roundevenf
; SSE2-NEXT: callq ___truncsfhf2
+; SSE2-NEXT: ## kill: def $ax killed $ax def $eax
+; SSE2-NEXT: pinsrw $0, %eax, %xmm0
; SSE2-NEXT: popq %rax
; SSE2-NEXT: retq
;
@@ -20,9 +24,13 @@ define half @roundeven_f16(half %h) {
; SSE41: ## %bb.0: ## %entry
; SSE41-NEXT: pushq %rax
; SSE41-NEXT: .cfi_def_cfa_offset 16
+; SSE41-NEXT: pextrw $0, %xmm0, %eax
+; SSE41-NEXT: movzwl %ax, %edi
; SSE41-NEXT: callq ___extendhfsf2
; SSE41-NEXT: roundss $8, %xmm0, %xmm0
; SSE41-NEXT: callq ___truncsfhf2
+; SSE41-NEXT: ## kill: def $ax killed $ax def $eax
+; SSE41-NEXT: pinsrw $0, %eax, %xmm0
; SSE41-NEXT: popq %rax
; SSE41-NEXT: retq
;
@@ -30,9 +38,13 @@ define half @roundeven_f16(half %h) {
; AVX1: ## %bb.0: ## %entry
; AVX1-NEXT: pushq %rax
; AVX1-NEXT: .cfi_def_cfa_offset 16
+; AVX1-NEXT: vpextrw $0, %xmm0, %eax
+; AVX1-NEXT: movzwl %ax, %edi
; AVX1-NEXT: callq ___extendhfsf2
; AVX1-NEXT: vroundss $8, %xmm0, %xmm0, %xmm0
; AVX1-NEXT: callq ___truncsfhf2
+; AVX1-NEXT: ## kill: def $ax killed $ax def $eax
+; AVX1-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
; AVX1-NEXT: popq %rax
; AVX1-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/half-constrained.ll b/llvm/test/CodeGen/X86/half-constrained.ll
index 23e201936ddec..b4db61a4a4a34 100644
--- a/llvm/test/CodeGen/X86/half-constrained.ll
+++ b/llvm/test/CodeGen/X86/half-constrained.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin | FileCheck %s --check-prefix=X32-NOF16C
-; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=f16c | FileCheck %s --check-prefix=X32-F16C
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s --check-prefix=X64-NOF16C
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=f16c | FileCheck %s --check-prefix=X64-F16C
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefix=X32-NOF16C
+; RUN: llc < %s -mtriple=i686-linux-gnu -mattr=f16c | FileCheck %s --check-prefix=X32-F16C
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=X64-NOF16C
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=f16c | FileCheck %s --check-prefix=X64-F16C
@a = global half 0xH0000, align 2
@b = global half 0xH0000, align 2
@@ -10,40 +10,45 @@
define float @half_to_float() strictfp {
; X32-NOF16C-LABEL: half_to_float:
-; X32-NOF16C: ## %bb.0:
+; X32-NOF16C: # %bb.0:
; X32-NOF16C-NEXT: subl $12, %esp
; X32-NOF16C-NEXT: .cfi_def_cfa_offset 16
-; X32-NOF16C-NEXT: movzwl _a, %eax
+; X32-NOF16C-NEXT: movzwl a, %eax
; X32-NOF16C-NEXT: movl %eax, (%esp)
-; X32-NOF16C-NEXT: calll ___extendhfsf2
+; X32-NOF16C-NEXT: calll __gnu_h2f_ieee
; X32-NOF16C-NEXT: addl $12, %esp
+; X32-NOF16C-NEXT: .cfi_def_cfa_offset 4
; X32-NOF16C-NEXT: retl
;
; X32-F16C-LABEL: half_to_float:
-; X32-F16C: ## %bb.0:
+; X32-F16C: # %bb.0:
; X32-F16C-NEXT: pushl %eax
; X32-F16C-NEXT: .cfi_def_cfa_offset 8
-; X32-F16C-NEXT: movzwl _a, %eax
+; X32-F16C-NEXT: movzwl a, %eax
; X32-F16C-NEXT: vmovd %eax, %xmm0
; X32-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; X32-F16C-NEXT: vmovss %xmm0, (%esp)
; X32-F16C-NEXT: flds (%esp)
; X32-F16C-NEXT: wait
; X32-F16C-NEXT: popl %eax
+; X32-F16C-NEXT: .cfi_def_cfa_offset 4
; X32-F16C-NEXT: retl
;
; X64-NOF16C-LABEL: half_to_float:
-; X64-NOF16C: ## %bb.0:
+; X64-NOF16C: # %bb.0:
; X64-NOF16C-NEXT: pushq %rax
; X64-NOF16C-NEXT: .cfi_def_cfa_offset 16
-; X64-NOF16C-NEXT: pinsrw $0, _a(%rip), %xmm0
-; X64-NOF16C-NEXT: callq ___extendhfsf2
+; X64-NOF16C-NEXT: movq a at GOTPCREL(%rip), %rax
+; X64-NOF16C-NEXT: pinsrw $0, (%rax), %xmm0
+; X64-NOF16C-NEXT: callq __extendhfsf2 at PLT
; X64-NOF16C-NEXT: popq %rax
+; X64-NOF16C-NEXT: .cfi_def_cfa_offset 8
; X64-NOF16C-NEXT: retq
;
; X64-F16C-LABEL: half_to_float:
-; X64-F16C: ## %bb.0:
-; X64-F16C-NEXT: movzwl _a(%rip), %eax
+; X64-F16C: # %bb.0:
+; X64-F16C-NEXT: movq a at GOTPCREL(%rip), %rax
+; X64-F16C-NEXT: movzwl (%rax), %eax
; X64-F16C-NEXT: vmovd %eax, %xmm0
; X64-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; X64-F16C-NEXT: retq
@@ -54,20 +59,21 @@ define float @half_to_float() strictfp {
define double @half_to_double() strictfp {
; X32-NOF16C-LABEL: half_to_double:
-; X32-NOF16C: ## %bb.0:
+; X32-NOF16C: # %bb.0:
; X32-NOF16C-NEXT: subl $12, %esp
; X32-NOF16C-NEXT: .cfi_def_cfa_offset 16
-; X32-NOF16C-NEXT: movzwl _a, %eax
+; X32-NOF16C-NEXT: movzwl a, %eax
; X32-NOF16C-NEXT: movl %eax, (%esp)
-; X32-NOF16C-NEXT: calll ___extendhfsf2
+; X32-NOF16C-NEXT: calll __gnu_h2f_ieee
; X32-NOF16C-NEXT: addl $12, %esp
+; X32-NOF16C-NEXT: .cfi_def_cfa_offset 4
; X32-NOF16C-NEXT: retl
;
; X32-F16C-LABEL: half_to_double:
-; X32-F16C: ## %bb.0:
+; X32-F16C: # %bb.0:
; X32-F16C-NEXT: subl $12, %esp
; X32-F16C-NEXT: .cfi_def_cfa_offset 16
-; X32-F16C-NEXT: movzwl _a, %eax
+; X32-F16C-NEXT: movzwl a, %eax
; X32-F16C-NEXT: vmovd %eax, %xmm0
; X32-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; X32-F16C-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
@@ -75,21 +81,25 @@ define double @half_to_double() strictfp {
; X32-F16C-NEXT: fldl (%esp)
; X32-F16C-NEXT: wait
; X32-F16C-NEXT: addl $12, %esp
+; X32-F16C-NEXT: .cfi_def_cfa_offset 4
; X32-F16C-NEXT: retl
;
; X64-NOF16C-LABEL: half_to_double:
-; X64-NOF16C: ## %bb.0:
+; X64-NOF16C: # %bb.0:
; X64-NOF16C-NEXT: pushq %rax
; X64-NOF16C-NEXT: .cfi_def_cfa_offset 16
-; X64-NOF16C-NEXT: pinsrw $0, _a(%rip), %xmm0
-; X64-NOF16C-NEXT: callq ___extendhfsf2
+; X64-NOF16C-NEXT: movq a at GOTPCREL(%rip), %rax
+; X64-NOF16C-NEXT: pinsrw $0, (%rax), %xmm0
+; X64-NOF16C-NEXT: callq __extendhfsf2 at PLT
; X64-NOF16C-NEXT: cvtss2sd %xmm0, %xmm0
; X64-NOF16C-NEXT: popq %rax
+; X64-NOF16C-NEXT: .cfi_def_cfa_offset 8
; X64-NOF16C-NEXT: retq
;
; X64-F16C-LABEL: half_to_double:
-; X64-F16C: ## %bb.0:
-; X64-F16C-NEXT: movzwl _a(%rip), %eax
+; X64-F16C: # %bb.0:
+; X64-F16C-NEXT: movq a at GOTPCREL(%rip), %rax
+; X64-F16C-NEXT: movzwl (%rax), %eax
; X64-F16C-NEXT: vmovd %eax, %xmm0
; X64-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; X64-F16C-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
@@ -101,41 +111,47 @@ define double @half_to_double() strictfp {
define x86_fp80 @half_to_fp80() strictfp {
; X32-NOF16C-LABEL: half_to_fp80:
-; X32-NOF16C: ## %bb.0:
+; X32-NOF16C: # %bb.0:
; X32-NOF16C-NEXT: subl $12, %esp
; X32-NOF16C-NEXT: .cfi_def_cfa_offset 16
-; X32-NOF16C-NEXT: movzwl _a, %eax
+; X32-NOF16C-NEXT: movzwl a, %eax
; X32-NOF16C-NEXT: movl %eax, (%esp)
-; X32-NOF16C-NEXT: calll ___extendhfsf2
+; X32-NOF16C-NEXT: calll __gnu_h2f_ieee
; X32-NOF16C-NEXT: addl $12, %esp
+; X32-NOF16C-NEXT: .cfi_def_cfa_offset 4
; X32-NOF16C-NEXT: retl
;
; X32-F16C-LABEL: half_to_fp80:
-; X32-F16C: ## %bb.0:
+; X32-F16C: # %bb.0:
; X32-F16C-NEXT: subl $12, %esp
; X32-F16C-NEXT: .cfi_def_cfa_offset 16
-; X32-F16C-NEXT: vpinsrw $0, _a, %xmm0, %xmm0
+; X32-F16C-NEXT: vpinsrw $0, a, %xmm0, %xmm0
; X32-F16C-NEXT: vpextrw $0, %xmm0, (%esp)
-; X32-F16C-NEXT: calll ___extendhfxf2
+; X32-F16C-NEXT: calll __extendhfxf2
; X32-F16C-NEXT: addl $12, %esp
+; X32-F16C-NEXT: .cfi_def_cfa_offset 4
; X32-F16C-NEXT: retl
;
; X64-NOF16C-LABEL: half_to_fp80:
-; X64-NOF16C: ## %bb.0:
+; X64-NOF16C: # %bb.0:
; X64-NOF16C-NEXT: pushq %rax
; X64-NOF16C-NEXT: .cfi_def_cfa_offset 16
-; X64-NOF16C-NEXT: pinsrw $0, _a(%rip), %xmm0
-; X64-NOF16C-NEXT: callq ___extendhfxf2
+; X64-NOF16C-NEXT: movq a at GOTPCREL(%rip), %rax
+; X64-NOF16C-NEXT: pinsrw $0, (%rax), %xmm0
+; X64-NOF16C-NEXT: callq __extendhfxf2 at PLT
; X64-NOF16C-NEXT: popq %rax
+; X64-NOF16C-NEXT: .cfi_def_cfa_offset 8
; X64-NOF16C-NEXT: retq
;
; X64-F16C-LABEL: half_to_fp80:
-; X64-F16C: ## %bb.0:
+; X64-F16C: # %bb.0:
; X64-F16C-NEXT: pushq %rax
; X64-F16C-NEXT: .cfi_def_cfa_offset 16
-; X64-F16C-NEXT: vpinsrw $0, _a(%rip), %xmm0, %xmm0
-; X64-F16C-NEXT: callq ___extendhfxf2
+; X64-F16C-NEXT: movq a at GOTPCREL(%rip), %rax
+; X64-F16C-NEXT: vpinsrw $0, (%rax), %xmm0, %xmm0
+; X64-F16C-NEXT: callq __extendhfxf2 at PLT
; X64-F16C-NEXT: popq %rax
+; X64-F16C-NEXT: .cfi_def_cfa_offset 8
; X64-F16C-NEXT: retq
%1 = load half, ptr @a, align 2
%2 = tail call x86_fp80 @llvm.experimental.constrained.fpext.f80.f16(half %1, metadata !"fpexcept.strict") #0
@@ -144,42 +160,46 @@ define x86_fp80 @half_to_fp80() strictfp {
define void @float_to_half(float %0) strictfp {
; X32-NOF16C-LABEL: float_to_half:
-; X32-NOF16C: ## %bb.0:
+; X32-NOF16C: # %bb.0:
; X32-NOF16C-NEXT: subl $12, %esp
; X32-NOF16C-NEXT: .cfi_def_cfa_offset 16
; X32-NOF16C-NEXT: flds {{[0-9]+}}(%esp)
; X32-NOF16C-NEXT: fstps (%esp)
; X32-NOF16C-NEXT: wait
-; X32-NOF16C-NEXT: calll ___truncsfhf2
-; X32-NOF16C-NEXT: movw %ax, _a
+; X32-NOF16C-NEXT: calll __gnu_f2h_ieee
+; X32-NOF16C-NEXT: movw %ax, a
; X32-NOF16C-NEXT: addl $12, %esp
+; X32-NOF16C-NEXT: .cfi_def_cfa_offset 4
; X32-NOF16C-NEXT: retl
;
; X32-F16C-LABEL: float_to_half:
-; X32-F16C: ## %bb.0:
+; X32-F16C: # %bb.0:
; X32-F16C-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; X32-F16C-NEXT: vmovd %xmm0, %eax
-; X32-F16C-NEXT: movw %ax, _a
+; X32-F16C-NEXT: movw %ax, a
; X32-F16C-NEXT: retl
;
; X64-NOF16C-LABEL: float_to_half:
-; X64-NOF16C: ## %bb.0:
+; X64-NOF16C: # %bb.0:
; X64-NOF16C-NEXT: pushq %rax
; X64-NOF16C-NEXT: .cfi_def_cfa_offset 16
-; X64-NOF16C-NEXT: callq ___truncsfhf2
+; X64-NOF16C-NEXT: callq __truncsfhf2 at PLT
; X64-NOF16C-NEXT: pextrw $0, %xmm0, %eax
-; X64-NOF16C-NEXT: movw %ax, _a(%rip)
+; X64-NOF16C-NEXT: movq a at GOTPCREL(%rip), %rcx
+; X64-NOF16C-NEXT: movw %ax, (%rcx)
; X64-NOF16C-NEXT: popq %rax
+; X64-NOF16C-NEXT: .cfi_def_cfa_offset 8
; X64-NOF16C-NEXT: retq
;
; X64-F16C-LABEL: float_to_half:
-; X64-F16C: ## %bb.0:
+; X64-F16C: # %bb.0:
; X64-F16C-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-F16C-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; X64-F16C-NEXT: vmovd %xmm0, %eax
-; X64-F16C-NEXT: movw %ax, _a(%rip)
+; X64-F16C-NEXT: movq a at GOTPCREL(%rip), %rcx
+; X64-F16C-NEXT: movw %ax, (%rcx)
; X64-F16C-NEXT: retq
%2 = tail call half @llvm.experimental.constrained.fptrunc.f16.f32(float %0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
store half %2, ptr @a, align 2
@@ -188,45 +208,51 @@ define void @float_to_half(float %0) strictfp {
define void @double_to_half(double %0) strictfp {
; X32-NOF16C-LABEL: double_to_half:
-; X32-NOF16C: ## %bb.0:
+; X32-NOF16C: # %bb.0:
; X32-NOF16C-NEXT: subl $12, %esp
; X32-NOF16C-NEXT: .cfi_def_cfa_offset 16
; X32-NOF16C-NEXT: fldl {{[0-9]+}}(%esp)
; X32-NOF16C-NEXT: fstpl (%esp)
; X32-NOF16C-NEXT: wait
-; X32-NOF16C-NEXT: calll ___truncdfhf2
-; X32-NOF16C-NEXT: movw %ax, _a
+; X32-NOF16C-NEXT: calll __truncdfhf2
+; X32-NOF16C-NEXT: movw %ax, a
; X32-NOF16C-NEXT: addl $12, %esp
+; X32-NOF16C-NEXT: .cfi_def_cfa_offset 4
; X32-NOF16C-NEXT: retl
;
; X32-F16C-LABEL: double_to_half:
-; X32-F16C: ## %bb.0:
+; X32-F16C: # %bb.0:
; X32-F16C-NEXT: subl $12, %esp
; X32-F16C-NEXT: .cfi_def_cfa_offset 16
; X32-F16C-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; X32-F16C-NEXT: vmovq %xmm0, (%esp)
-; X32-F16C-NEXT: calll ___truncdfhf2
-; X32-F16C-NEXT: vpextrw $0, %xmm0, _a
+; X32-F16C-NEXT: calll __truncdfhf2
+; X32-F16C-NEXT: vpextrw $0, %xmm0, a
; X32-F16C-NEXT: addl $12, %esp
+; X32-F16C-NEXT: .cfi_def_cfa_offset 4
; X32-F16C-NEXT: retl
;
; X64-NOF16C-LABEL: double_to_half:
-; X64-NOF16C: ## %bb.0:
+; X64-NOF16C: # %bb.0:
; X64-NOF16C-NEXT: pushq %rax
; X64-NOF16C-NEXT: .cfi_def_cfa_offset 16
-; X64-NOF16C-NEXT: callq ___truncdfhf2
+; X64-NOF16C-NEXT: callq __truncdfhf2 at PLT
; X64-NOF16C-NEXT: pextrw $0, %xmm0, %eax
-; X64-NOF16C-NEXT: movw %ax, _a(%rip)
+; X64-NOF16C-NEXT: movq a at GOTPCREL(%rip), %rcx
+; X64-NOF16C-NEXT: movw %ax, (%rcx)
; X64-NOF16C-NEXT: popq %rax
+; X64-NOF16C-NEXT: .cfi_def_cfa_offset 8
; X64-NOF16C-NEXT: retq
;
; X64-F16C-LABEL: double_to_half:
-; X64-F16C: ## %bb.0:
+; X64-F16C: # %bb.0:
; X64-F16C-NEXT: pushq %rax
; X64-F16C-NEXT: .cfi_def_cfa_offset 16
-; X64-F16C-NEXT: callq ___truncdfhf2
-; X64-F16C-NEXT: vpextrw $0, %xmm0, _a(%rip)
+; X64-F16C-NEXT: callq __truncdfhf2 at PLT
+; X64-F16C-NEXT: movq a at GOTPCREL(%rip), %rax
+; X64-F16C-NEXT: vpextrw $0, %xmm0, (%rax)
; X64-F16C-NEXT: popq %rax
+; X64-F16C-NEXT: .cfi_def_cfa_offset 8
; X64-F16C-NEXT: retq
%2 = tail call half @llvm.experimental.constrained.fptrunc.f16.f64(double %0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
store half %2, ptr @a, align 2
@@ -235,52 +261,58 @@ define void @double_to_half(double %0) strictfp {
define void @fp80_to_half(x86_fp80 %0) strictfp {
; X32-NOF16C-LABEL: fp80_to_half:
-; X32-NOF16C: ## %bb.0:
-; X32-NOF16C-NEXT: subl $28, %esp
-; X32-NOF16C-NEXT: .cfi_def_cfa_offset 32
+; X32-NOF16C: # %bb.0:
+; X32-NOF16C-NEXT: subl $12, %esp
+; X32-NOF16C-NEXT: .cfi_def_cfa_offset 16
; X32-NOF16C-NEXT: fldt {{[0-9]+}}(%esp)
; X32-NOF16C-NEXT: fstpt (%esp)
; X32-NOF16C-NEXT: wait
-; X32-NOF16C-NEXT: calll ___truncxfhf2
-; X32-NOF16C-NEXT: movw %ax, _a
-; X32-NOF16C-NEXT: addl $28, %esp
+; X32-NOF16C-NEXT: calll __truncxfhf2
+; X32-NOF16C-NEXT: movw %ax, a
+; X32-NOF16C-NEXT: addl $12, %esp
+; X32-NOF16C-NEXT: .cfi_def_cfa_offset 4
; X32-NOF16C-NEXT: retl
;
; X32-F16C-LABEL: fp80_to_half:
-; X32-F16C: ## %bb.0:
-; X32-F16C-NEXT: subl $28, %esp
-; X32-F16C-NEXT: .cfi_def_cfa_offset 32
+; X32-F16C: # %bb.0:
+; X32-F16C-NEXT: subl $12, %esp
+; X32-F16C-NEXT: .cfi_def_cfa_offset 16
; X32-F16C-NEXT: fldt {{[0-9]+}}(%esp)
; X32-F16C-NEXT: fstpt (%esp)
; X32-F16C-NEXT: wait
-; X32-F16C-NEXT: calll ___truncxfhf2
-; X32-F16C-NEXT: vpextrw $0, %xmm0, _a
-; X32-F16C-NEXT: addl $28, %esp
+; X32-F16C-NEXT: calll __truncxfhf2
+; X32-F16C-NEXT: vpextrw $0, %xmm0, a
+; X32-F16C-NEXT: addl $12, %esp
+; X32-F16C-NEXT: .cfi_def_cfa_offset 4
; X32-F16C-NEXT: retl
;
; X64-NOF16C-LABEL: fp80_to_half:
-; X64-NOF16C: ## %bb.0:
+; X64-NOF16C: # %bb.0:
; X64-NOF16C-NEXT: subq $24, %rsp
; X64-NOF16C-NEXT: .cfi_def_cfa_offset 32
; X64-NOF16C-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-NOF16C-NEXT: fstpt (%rsp)
; X64-NOF16C-NEXT: wait
-; X64-NOF16C-NEXT: callq ___truncxfhf2
+; X64-NOF16C-NEXT: callq __truncxfhf2 at PLT
; X64-NOF16C-NEXT: pextrw $0, %xmm0, %eax
-; X64-NOF16C-NEXT: movw %ax, _a(%rip)
+; X64-NOF16C-NEXT: movq a at GOTPCREL(%rip), %rcx
+; X64-NOF16C-NEXT: movw %ax, (%rcx)
; X64-NOF16C-NEXT: addq $24, %rsp
+; X64-NOF16C-NEXT: .cfi_def_cfa_offset 8
; X64-NOF16C-NEXT: retq
;
; X64-F16C-LABEL: fp80_to_half:
-; X64-F16C: ## %bb.0:
+; X64-F16C: # %bb.0:
; X64-F16C-NEXT: subq $24, %rsp
; X64-F16C-NEXT: .cfi_def_cfa_offset 32
; X64-F16C-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-F16C-NEXT: fstpt (%rsp)
; X64-F16C-NEXT: wait
-; X64-F16C-NEXT: callq ___truncxfhf2
-; X64-F16C-NEXT: vpextrw $0, %xmm0, _a(%rip)
+; X64-F16C-NEXT: callq __truncxfhf2 at PLT
+; X64-F16C-NEXT: movq a at GOTPCREL(%rip), %rax
+; X64-F16C-NEXT: vpextrw $0, %xmm0, (%rax)
; X64-F16C-NEXT: addq $24, %rsp
+; X64-F16C-NEXT: .cfi_def_cfa_offset 8
; X64-F16C-NEXT: retq
%2 = tail call half @llvm.experimental.constrained.fptrunc.f16.f80(x86_fp80 %0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
store half %2, ptr @a, align 2
@@ -289,32 +321,33 @@ define void @fp80_to_half(x86_fp80 %0) strictfp {
define void @add() strictfp {
; X32-NOF16C-LABEL: add:
-; X32-NOF16C: ## %bb.0:
+; X32-NOF16C: # %bb.0:
; X32-NOF16C-NEXT: subl $12, %esp
; X32-NOF16C-NEXT: .cfi_def_cfa_offset 16
-; X32-NOF16C-NEXT: movzwl _a, %eax
+; X32-NOF16C-NEXT: movzwl a, %eax
; X32-NOF16C-NEXT: movl %eax, (%esp)
-; X32-NOF16C-NEXT: calll ___extendhfsf2
-; X32-NOF16C-NEXT: fstps {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Spill
+; X32-NOF16C-NEXT: calll __gnu_h2f_ieee
+; X32-NOF16C-NEXT: fstps {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NOF16C-NEXT: wait
-; X32-NOF16C-NEXT: movzwl _b, %eax
+; X32-NOF16C-NEXT: movzwl b, %eax
; X32-NOF16C-NEXT: movl %eax, (%esp)
-; X32-NOF16C-NEXT: calll ___extendhfsf2
-; X32-NOF16C-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Reload
+; X32-NOF16C-NEXT: calll __gnu_h2f_ieee
+; X32-NOF16C-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X32-NOF16C-NEXT: faddp %st, %st(1)
; X32-NOF16C-NEXT: fstps (%esp)
; X32-NOF16C-NEXT: wait
-; X32-NOF16C-NEXT: calll ___truncsfhf2
-; X32-NOF16C-NEXT: movw %ax, _c
+; X32-NOF16C-NEXT: calll __gnu_f2h_ieee
+; X32-NOF16C-NEXT: movw %ax, c
; X32-NOF16C-NEXT: addl $12, %esp
+; X32-NOF16C-NEXT: .cfi_def_cfa_offset 4
; X32-NOF16C-NEXT: retl
;
; X32-F16C-LABEL: add:
-; X32-F16C: ## %bb.0:
-; X32-F16C-NEXT: movzwl _a, %eax
+; X32-F16C: # %bb.0:
+; X32-F16C-NEXT: movzwl a, %eax
; X32-F16C-NEXT: vmovd %eax, %xmm0
; X32-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
-; X32-F16C-NEXT: movzwl _b, %eax
+; X32-F16C-NEXT: movzwl b, %eax
; X32-F16C-NEXT: vmovd %eax, %xmm1
; X32-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
; X32-F16C-NEXT: vaddss %xmm1, %xmm0, %xmm0
@@ -322,31 +355,37 @@ define void @add() strictfp {
; X32-F16C-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X32-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; X32-F16C-NEXT: vmovd %xmm0, %eax
-; X32-F16C-NEXT: movw %ax, _c
+; X32-F16C-NEXT: movw %ax, c
; X32-F16C-NEXT: retl
;
; X64-NOF16C-LABEL: add:
-; X64-NOF16C: ## %bb.0:
+; X64-NOF16C: # %bb.0:
; X64-NOF16C-NEXT: pushq %rax
; X64-NOF16C-NEXT: .cfi_def_cfa_offset 16
-; X64-NOF16C-NEXT: pinsrw $0, _a(%rip), %xmm0
-; X64-NOF16C-NEXT: callq ___extendhfsf2
-; X64-NOF16C-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Folded Spill
-; X64-NOF16C-NEXT: pinsrw $0, _b(%rip), %xmm0
-; X64-NOF16C-NEXT: callq ___extendhfsf2
-; X64-NOF16C-NEXT: addss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 4-byte Folded Reload
-; X64-NOF16C-NEXT: callq ___truncsfhf2
+; X64-NOF16C-NEXT: movq a at GOTPCREL(%rip), %rax
+; X64-NOF16C-NEXT: pinsrw $0, (%rax), %xmm0
+; X64-NOF16C-NEXT: callq __extendhfsf2 at PLT
+; X64-NOF16C-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; X64-NOF16C-NEXT: movq b at GOTPCREL(%rip), %rax
+; X64-NOF16C-NEXT: pinsrw $0, (%rax), %xmm0
+; X64-NOF16C-NEXT: callq __extendhfsf2 at PLT
+; X64-NOF16C-NEXT: addss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; X64-NOF16C-NEXT: callq __truncsfhf2 at PLT
; X64-NOF16C-NEXT: pextrw $0, %xmm0, %eax
-; X64-NOF16C-NEXT: movw %ax, _c(%rip)
+; X64-NOF16C-NEXT: movq c at GOTPCREL(%rip), %rcx
+; X64-NOF16C-NEXT: movw %ax, (%rcx)
; X64-NOF16C-NEXT: popq %rax
+; X64-NOF16C-NEXT: .cfi_def_cfa_offset 8
; X64-NOF16C-NEXT: retq
;
; X64-F16C-LABEL: add:
-; X64-F16C: ## %bb.0:
-; X64-F16C-NEXT: movzwl _a(%rip), %eax
+; X64-F16C: # %bb.0:
+; X64-F16C-NEXT: movq a at GOTPCREL(%rip), %rax
+; X64-F16C-NEXT: movzwl (%rax), %eax
; X64-F16C-NEXT: vmovd %eax, %xmm0
; X64-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
-; X64-F16C-NEXT: movzwl _b(%rip), %eax
+; X64-F16C-NEXT: movq b at GOTPCREL(%rip), %rax
+; X64-F16C-NEXT: movzwl (%rax), %eax
; X64-F16C-NEXT: vmovd %eax, %xmm1
; X64-F16C-NEXT: vcvtph2ps %xmm1, %xmm1
; X64-F16C-NEXT: vaddss %xmm1, %xmm0, %xmm0
@@ -354,7 +393,8 @@ define void @add() strictfp {
; X64-F16C-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; X64-F16C-NEXT: vmovd %xmm0, %eax
-; X64-F16C-NEXT: movw %ax, _c(%rip)
+; X64-F16C-NEXT: movq c at GOTPCREL(%rip), %rcx
+; X64-F16C-NEXT: movw %ax, (%rcx)
; X64-F16C-NEXT: retq
%1 = load half, ptr @a, align 2
%2 = tail call float @llvm.experimental.constrained.fpext.f32.f16(half %1, metadata !"fpexcept.strict") #0
diff --git a/llvm/test/CodeGen/X86/half-darwin.ll b/llvm/test/CodeGen/X86/half-darwin.ll
new file mode 100644
index 0000000000000..cdc188b3039c2
--- /dev/null
+++ b/llvm/test/CodeGen/X86/half-darwin.ll
@@ -0,0 +1,190 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-apple-macosx %s -o - | FileCheck %s --check-prefix=CHECK-SOFT
+; RUN: llc -mtriple=x86_64-apple-macosx -mattr=+f16c %s -o - | FileCheck %s --check-prefix=CHECK-F16C
+; RUN: llc -mtriple=x86_64-apple-macosx -mattr=+avx512fp16 %s -o - | FileCheck %s --check-prefix=CHECK-FP16
+
+define void @truncsfhf(float %in, ptr %ptr) nounwind {
+; CHECK-SOFT-LABEL: truncsfhf:
+; CHECK-SOFT: ## %bb.0:
+; CHECK-SOFT-NEXT: pushq %rbx
+; CHECK-SOFT-NEXT: movq %rdi, %rbx
+; CHECK-SOFT-NEXT: callq ___truncsfhf2
+; CHECK-SOFT-NEXT: movw %ax, (%rbx)
+; CHECK-SOFT-NEXT: popq %rbx
+; CHECK-SOFT-NEXT: retq
+;
+; CHECK-F16C-LABEL: truncsfhf:
+; CHECK-F16C: ## %bb.0:
+; CHECK-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; CHECK-F16C-NEXT: vmovd %xmm0, %eax
+; CHECK-F16C-NEXT: movw %ax, (%rdi)
+; CHECK-F16C-NEXT: retq
+;
+; CHECK-FP16-LABEL: truncsfhf:
+; CHECK-FP16: ## %bb.0:
+; CHECK-FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
+; CHECK-FP16-NEXT: vmovsh %xmm0, (%rdi)
+; CHECK-FP16-NEXT: retq
+
+
+
+ %half = fptrunc float %in to half
+ store half %half, ptr %ptr
+ ret void
+}
+
+define void @truncdfhf(double %in, ptr %ptr) nounwind {
+; CHECK-SOFT-LABEL: truncdfhf:
+; CHECK-SOFT: ## %bb.0:
+; CHECK-SOFT-NEXT: pushq %rbx
+; CHECK-SOFT-NEXT: movq %rdi, %rbx
+; CHECK-SOFT-NEXT: callq ___truncdfhf2
+; CHECK-SOFT-NEXT: movw %ax, (%rbx)
+; CHECK-SOFT-NEXT: popq %rbx
+; CHECK-SOFT-NEXT: retq
+;
+; CHECK-F16C-LABEL: truncdfhf:
+; CHECK-F16C: ## %bb.0:
+; CHECK-F16C-NEXT: pushq %rbx
+; CHECK-F16C-NEXT: movq %rdi, %rbx
+; CHECK-F16C-NEXT: callq ___truncdfhf2
+; CHECK-F16C-NEXT: movw %ax, (%rbx)
+; CHECK-F16C-NEXT: popq %rbx
+; CHECK-F16C-NEXT: retq
+;
+; CHECK-FP16-LABEL: truncdfhf:
+; CHECK-FP16: ## %bb.0:
+; CHECK-FP16-NEXT: vcvtsd2sh %xmm0, %xmm0, %xmm0
+; CHECK-FP16-NEXT: vmovsh %xmm0, (%rdi)
+; CHECK-FP16-NEXT: retq
+
+
+
+ %half = fptrunc double %in to half
+ store half %half, ptr %ptr
+ ret void
+}
+
+define float @extendhfsf(ptr %ptr) nounwind {
+; CHECK-SOFT-LABEL: extendhfsf:
+; CHECK-SOFT: ## %bb.0:
+; CHECK-SOFT-NEXT: pushq %rax
+; CHECK-SOFT-NEXT: movzwl (%rdi), %edi
+; CHECK-SOFT-NEXT: callq ___extendhfsf2
+; CHECK-SOFT-NEXT: popq %rax
+; CHECK-SOFT-NEXT: retq
+;
+; CHECK-F16C-LABEL: extendhfsf:
+; CHECK-F16C: ## %bb.0:
+; CHECK-F16C-NEXT: movzwl (%rdi), %eax
+; CHECK-F16C-NEXT: vmovd %eax, %xmm0
+; CHECK-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; CHECK-F16C-NEXT: retq
+;
+; CHECK-FP16-LABEL: extendhfsf:
+; CHECK-FP16: ## %bb.0:
+; CHECK-FP16-NEXT: vmovsh (%rdi), %xmm0
+; CHECK-FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
+; CHECK-FP16-NEXT: retq
+
+
+
+ %in = load half, ptr %ptr
+ %float = fpext half %in to float
+ ret float %float
+}
+
+define void @strict_truncsfhf(float %in, ptr %ptr) nounwind {
+; CHECK-SOFT-LABEL: strict_truncsfhf:
+; CHECK-SOFT: ## %bb.0:
+; CHECK-SOFT-NEXT: pushq %rbx
+; CHECK-SOFT-NEXT: movq %rdi, %rbx
+; CHECK-SOFT-NEXT: callq ___truncsfhf2
+; CHECK-SOFT-NEXT: movw %ax, (%rbx)
+; CHECK-SOFT-NEXT: popq %rbx
+; CHECK-SOFT-NEXT: retq
+;
+; CHECK-F16C-LABEL: strict_truncsfhf:
+; CHECK-F16C: ## %bb.0:
+; CHECK-F16C-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; CHECK-F16C-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; CHECK-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; CHECK-F16C-NEXT: vmovd %xmm0, %eax
+; CHECK-F16C-NEXT: movw %ax, (%rdi)
+; CHECK-F16C-NEXT: retq
+;
+; CHECK-FP16-LABEL: strict_truncsfhf:
+; CHECK-FP16: ## %bb.0:
+; CHECK-FP16-NEXT: vcvtss2sh %xmm0, %xmm0, %xmm0
+; CHECK-FP16-NEXT: vmovsh %xmm0, (%rdi)
+; CHECK-FP16-NEXT: retq
+
+
+
+ %half = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %in, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ store half %half, ptr %ptr
+ ret void
+}
+
+define void @strict_truncdfhf(double %in, ptr %ptr) nounwind {
+; CHECK-SOFT-LABEL: strict_truncdfhf:
+; CHECK-SOFT: ## %bb.0:
+; CHECK-SOFT-NEXT: pushq %rbx
+; CHECK-SOFT-NEXT: movq %rdi, %rbx
+; CHECK-SOFT-NEXT: callq ___truncdfhf2
+; CHECK-SOFT-NEXT: movw %ax, (%rbx)
+; CHECK-SOFT-NEXT: popq %rbx
+; CHECK-SOFT-NEXT: retq
+;
+; CHECK-F16C-LABEL: strict_truncdfhf:
+; CHECK-F16C: ## %bb.0:
+; CHECK-F16C-NEXT: pushq %rbx
+; CHECK-F16C-NEXT: movq %rdi, %rbx
+; CHECK-F16C-NEXT: callq ___truncdfhf2
+; CHECK-F16C-NEXT: movw %ax, (%rbx)
+; CHECK-F16C-NEXT: popq %rbx
+; CHECK-F16C-NEXT: retq
+;
+; CHECK-FP16-LABEL: strict_truncdfhf:
+; CHECK-FP16: ## %bb.0:
+; CHECK-FP16-NEXT: vcvtsd2sh %xmm0, %xmm0, %xmm0
+; CHECK-FP16-NEXT: vmovsh %xmm0, (%rdi)
+; CHECK-FP16-NEXT: retq
+
+
+ %half = call half @llvm.experimental.constrained.fptrunc.f16.f64(double %in, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ store half %half, ptr %ptr
+ ret void
+}
+
+define float @strict_extendhfsf(ptr %ptr) nounwind {
+; CHECK-SOFT-LABEL: strict_extendhfsf:
+; CHECK-SOFT: ## %bb.0:
+; CHECK-SOFT-NEXT: pushq %rax
+; CHECK-SOFT-NEXT: movzwl (%rdi), %edi
+; CHECK-SOFT-NEXT: callq ___extendhfsf2
+; CHECK-SOFT-NEXT: popq %rax
+; CHECK-SOFT-NEXT: retq
+;
+; CHECK-F16C-LABEL: strict_extendhfsf:
+; CHECK-F16C: ## %bb.0:
+; CHECK-F16C-NEXT: movzwl (%rdi), %eax
+; CHECK-F16C-NEXT: vmovd %eax, %xmm0
+; CHECK-F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; CHECK-F16C-NEXT: retq
+;
+; CHECK-FP16-LABEL: strict_extendhfsf:
+; CHECK-FP16: ## %bb.0:
+; CHECK-FP16-NEXT: vmovsh (%rdi), %xmm0
+; CHECK-FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0
+; CHECK-FP16-NEXT: retq
+
+
+ %in = load half, ptr %ptr
+ %float = call float @llvm.experimental.constrained.fpext.f32.f16(half %in, metadata !"fpexcept.strict")
+ ret float %float
+}
+
+declare half @llvm.experimental.constrained.fptrunc.f16.f32(float, metadata, metadata)
+declare half @llvm.experimental.constrained.fptrunc.f16.f64(double, metadata, metadata)
+declare float @llvm.experimental.constrained.fpext.f32.f16(half, metadata)
More information about the llvm-commits
mailing list