[llvm] [X86][FP16] Widen 128/256-bit CVTTP2xI to 512-bit when VLX not enabled (PR #142763)

Phoebe Wang via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 4 03:19:41 PDT 2025


https://github.com/phoebewang updated https://github.com/llvm/llvm-project/pull/142763

>From ec89acb9d81c288727f052ee8e2e620e319409d4 Mon Sep 17 00:00:00 2001
From: "Wang, Phoebe" <phoebe.wang at intel.com>
Date: Wed, 4 Jun 2025 18:10:05 +0800
Subject: [PATCH] [X86][FP16] Winden 128/256-bit CVTTP2xI to 512-bit when VLX
 not enabled

---
 llvm/lib/Target/X86/X86ISelLowering.cpp       |  55 +++-
 .../X86/vec-strict-fptoint-128-fp16.ll        | 310 ++++++++++++++++++
 .../X86/vec-strict-fptoint-256-fp16.ll        | 104 ++++++
 3 files changed, 454 insertions(+), 15 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index b1a3e3c006bb3..fb76846297eb9 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2371,6 +2371,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
       setOperationAction(ISD::LLRINT, MVT::v8f16, Legal);
     }
 
+    setOperationAction(ISD::FP_TO_SINT,         MVT::v8i16, Custom);
+    setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v8i16, Custom);
+    setOperationAction(ISD::FP_TO_UINT,         MVT::v8i16, Custom);
+    setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v8i16, Custom);
+
     if (Subtarget.hasVLX()) {
       setGroup(MVT::v8f16);
       setGroup(MVT::v16f16);
@@ -2386,10 +2391,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
       setOperationAction(ISD::UINT_TO_FP,         MVT::v8i16,  Legal);
       setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v8i16,  Legal);
 
-      setOperationAction(ISD::FP_TO_SINT,         MVT::v8i16, Custom);
-      setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v8i16, Custom);
-      setOperationAction(ISD::FP_TO_UINT,         MVT::v8i16, Custom);
-      setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v8i16, Custom);
       setOperationAction(ISD::FP_ROUND,           MVT::v8f16, Legal);
       setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v8f16, Legal);
       setOperationAction(ISD::FP_EXTEND,          MVT::v8f32, Custom);
@@ -20010,10 +20011,12 @@ static SDValue promoteXINT_TO_FP(SDValue Op, const SDLoc &dl,
 
 static bool isLegalConversion(MVT VT, MVT FloatVT, bool IsSigned,
                               const X86Subtarget &Subtarget) {
-  if (VT == MVT::v4i32 && Subtarget.hasSSE2() && IsSigned)
-    return true;
-  if (VT == MVT::v8i32 && Subtarget.hasAVX() && IsSigned)
-    return true;
+  if (FloatVT.getScalarType() != MVT::f16 || Subtarget.hasVLX()) {
+    if (VT == MVT::v4i32 && Subtarget.hasSSE2() && IsSigned)
+      return true;
+    if (VT == MVT::v8i32 && Subtarget.hasAVX() && IsSigned)
+      return true;
+  }
   if (Subtarget.hasVLX() && (VT == MVT::v4i32 || VT == MVT::v8i32))
     return true;
   if (Subtarget.useAVX512Regs()) {
@@ -21552,6 +21555,7 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
   bool IsStrict = Op->isStrictFPOpcode();
   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
+  bool HasVLX = Subtarget.hasVLX();
   MVT VT = Op->getSimpleValueType(0);
   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
   SDValue Chain = IsStrict ? Op->getOperand(0) : SDValue();
@@ -21582,7 +21586,7 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
       else
         Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
 
-      if (!IsSigned && !Subtarget.hasVLX()) {
+      if (!IsSigned && !HasVLX) {
         assert(Subtarget.useAVX512Regs() && "Unexpected features!");
         // Widen to 512-bits.
         ResVT = MVT::v8i32;
@@ -21612,7 +21616,8 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
     }
 
     if (Subtarget.hasFP16() && SrcVT.getVectorElementType() == MVT::f16) {
-      if (VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16)
+      if ((HasVLX && (VT == MVT::v8i16 || VT == MVT::v16i16)) ||
+          VT == MVT::v32i16)
         return Op;
 
       MVT ResVT = VT;
@@ -21620,7 +21625,7 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
       if (EleVT != MVT::i64)
         ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
 
-      if (SrcVT != MVT::v8f16) {
+      if (SrcVT == MVT::v2f16 || SrcVT == MVT::v4f16) {
         SDValue Tmp =
             IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
         SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
@@ -21628,6 +21633,22 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
       }
 
+      if (!HasVLX) {
+        assert(Subtarget.useAVX512Regs() && "Unexpected features!");
+        // Widen to 512-bits.
+        unsigned IntSize = EleVT.getSizeInBits();
+        unsigned Num = IntSize > 16 ? 512 / IntSize : 32;
+        MVT TmpVT = MVT::getVectorVT(MVT::f16, Num);
+        ResVT = MVT::getVectorVT(EleVT, Num);
+        // Need to concat with zero vector for strict fp to avoid spurious
+        // exceptions.
+        // TODO: Should we just do this for non-strict as well?
+        SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, TmpVT)
+                               : DAG.getUNDEF(TmpVT);
+        Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, TmpVT, Tmp, Src,
+                          DAG.getVectorIdxConstant(0, dl));
+      }
+
       if (IsStrict) {
         Res = DAG.getNode(IsSigned ? X86ISD::STRICT_CVTTP2SI
                                    : X86ISD::STRICT_CVTTP2UI,
@@ -21640,7 +21661,8 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
 
       // TODO: Need to add exception check code for strict FP.
       if (EleVT.getSizeInBits() < 16) {
-        ResVT = MVT::getVectorVT(EleVT, 8);
+        if (HasVLX)
+          ResVT = MVT::getVectorVT(EleVT, 8);
         Res = DAG.getNode(ISD::TRUNCATE, dl, ResVT, Res);
       }
 
@@ -34123,12 +34145,10 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
       }
 
       if (IsStrict) {
-        Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
         Res =
             DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {N->getOperand(0), Src});
         Chain = Res.getValue(1);
       } else {
-        Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
         Res = DAG.getNode(Opc, dl, ResVT, Src);
       }
 
@@ -44126,7 +44146,12 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
       // Conversions.
       // TODO: Add more CVT opcodes when we have test coverage.
     case X86ISD::CVTTP2SI:
-    case X86ISD::CVTTP2UI:
+    case X86ISD::CVTTP2UI: {
+      if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f16 &&
+          !Subtarget.hasVLX())
+        break;
+      [[fallthrough]];
+    }
     case X86ISD::CVTPH2PS: {
       SDLoc DL(Op);
       unsigned Scale = SizeInBits / ExtSizeInBits;
diff --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-128-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-128-fp16.ll
index 0a9dd78afb8cc..0126685f2bb32 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-128-fp16.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-128-fp16.ll
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512fp16,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16 -O3 | FileCheck %s --check-prefixes=NOVL
 
 declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f16(<2 x half>, metadata)
 declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f16(<2 x half>, metadata)
@@ -34,6 +35,16 @@ define <2 x i64> @strict_vector_fptosi_v2f16_to_v2i64(<2 x half> %a) #0 {
 ; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; CHECK-NEXT:    vcvttph2qq %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v2f16_to_v2i64:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vcvttsh2si %xmm0, %rax
+; NOVL-NEXT:    vmovq %rax, %xmm1
+; NOVL-NEXT:    vpsrld $16, %xmm0, %xmm0
+; NOVL-NEXT:    vcvttsh2si %xmm0, %rax
+; NOVL-NEXT:    vmovq %rax, %xmm0
+; NOVL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; NOVL-NEXT:    retq
   %ret = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f16(<2 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <2 x i64> %ret
@@ -46,6 +57,16 @@ define <2 x i64> @strict_vector_fptoui_v2f16_to_v2i64(<2 x half> %a) #0 {
 ; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; CHECK-NEXT:    vcvttph2uqq %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v2f16_to_v2i64:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vcvttsh2usi %xmm0, %rax
+; NOVL-NEXT:    vmovq %rax, %xmm1
+; NOVL-NEXT:    vpsrld $16, %xmm0, %xmm0
+; NOVL-NEXT:    vcvttsh2usi %xmm0, %rax
+; NOVL-NEXT:    vmovq %rax, %xmm0
+; NOVL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; NOVL-NEXT:    retq
   %ret = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f16(<2 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <2 x i64> %ret
@@ -58,6 +79,17 @@ define <2 x i32> @strict_vector_fptosi_v2f16_to_v2i32(<2 x half> %a) #0 {
 ; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; CHECK-NEXT:    vcvttph2dq %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v2f16_to_v2i32:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; NOVL-NEXT:    vcvttph2dq %ymm0, %zmm0
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f16(<2 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <2 x i32> %ret
@@ -70,6 +102,17 @@ define <2 x i32> @strict_vector_fptoui_v2f16_to_v2i32(<2 x half> %a) #0 {
 ; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; CHECK-NEXT:    vcvttph2udq %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v2f16_to_v2i32:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; NOVL-NEXT:    vcvttph2udq %ymm0, %zmm0
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f16(<2 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <2 x i32> %ret
@@ -82,6 +125,17 @@ define <2 x i16> @strict_vector_fptosi_v2f16_to_v2i16(<2 x half> %a) #0 {
 ; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; CHECK-NEXT:    vcvttph2w %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v2f16_to_v2i16:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2w %zmm0, %zmm0
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f16(<2 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <2 x i16> %ret
@@ -94,6 +148,17 @@ define <2 x i16> @strict_vector_fptoui_v2f16_to_v2i16(<2 x half> %a) #0 {
 ; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
 ; CHECK-NEXT:    vcvttph2uw %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v2f16_to_v2i16:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2uw %zmm0, %zmm0
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f16(<2 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <2 x i16> %ret
@@ -107,6 +172,17 @@ define <2 x i8> @strict_vector_fptosi_v2f16_to_v2i8(<2 x half> %a) #0 {
 ; CHECK-NEXT:    vcvttph2w %xmm0, %xmm0
 ; CHECK-NEXT:    vpmovwb %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v2f16_to_v2i8:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2w %zmm0, %zmm0
+; NOVL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f16(<2 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <2 x i8> %ret
@@ -120,6 +196,17 @@ define <2 x i8> @strict_vector_fptoui_v2f16_to_v2i8(<2 x half> %a) #0 {
 ; CHECK-NEXT:    vcvttph2uw %xmm0, %xmm0
 ; CHECK-NEXT:    vpmovwb %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v2f16_to_v2i8:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2uw %zmm0, %zmm0
+; NOVL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f16(<2 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <2 x i8> %ret
@@ -136,6 +223,21 @@ define <2 x i1> @strict_vector_fptosi_v2f16_to_v2i1(<2 x half> %a) #0 {
 ; CHECK-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v2f16_to_v2i1:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vcvttsh2si %xmm0, %eax
+; NOVL-NEXT:    andl $1, %eax
+; NOVL-NEXT:    kmovw %eax, %k0
+; NOVL-NEXT:    vpsrld $16, %xmm0, %xmm0
+; NOVL-NEXT:    vcvttsh2si %xmm0, %eax
+; NOVL-NEXT:    kmovd %eax, %k1
+; NOVL-NEXT:    kshiftlw $1, %k1, %k1
+; NOVL-NEXT:    korw %k1, %k0, %k1
+; NOVL-NEXT:    vpternlogq {{.*#+}} zmm0 {%k1} {z} = -1
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f16(<2 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <2 x i1> %ret
@@ -152,6 +254,21 @@ define <2 x i1> @strict_vector_fptoui_v2f16_to_v2i1(<2 x half> %a) #0 {
 ; CHECK-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vmovdqa64 %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v2f16_to_v2i1:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vcvttsh2si %xmm0, %eax
+; NOVL-NEXT:    andl $1, %eax
+; NOVL-NEXT:    kmovw %eax, %k0
+; NOVL-NEXT:    vpsrld $16, %xmm0, %xmm0
+; NOVL-NEXT:    vcvttsh2si %xmm0, %eax
+; NOVL-NEXT:    kmovd %eax, %k1
+; NOVL-NEXT:    kshiftlw $1, %k1, %k1
+; NOVL-NEXT:    korw %k1, %k0, %k1
+; NOVL-NEXT:    vpternlogq {{.*#+}} zmm0 {%k1} {z} = -1
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f16(<2 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <2 x i1> %ret
@@ -163,6 +280,21 @@ define <4 x i32> @strict_vector_fptosi_v4f16_to_v4i32(<4 x half> %a) #0 {
 ; CHECK-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
 ; CHECK-NEXT:    vcvttph2dq %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v4f16_to_v4i32:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vpsrld $16, %xmm0, %xmm1
+; NOVL-NEXT:    vcvttsh2si %xmm1, %eax
+; NOVL-NEXT:    vcvttsh2si %xmm0, %ecx
+; NOVL-NEXT:    vmovd %ecx, %xmm1
+; NOVL-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
+; NOVL-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; NOVL-NEXT:    vcvttsh2si %xmm2, %eax
+; NOVL-NEXT:    vpinsrd $2, %eax, %xmm1, %xmm1
+; NOVL-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; NOVL-NEXT:    vcvttsh2si %xmm0, %eax
+; NOVL-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm0
+; NOVL-NEXT:    retq
   %ret = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f16(<4 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <4 x i32> %ret
@@ -174,6 +306,21 @@ define <4 x i32> @strict_vector_fptoui_v4f16_to_v4i32(<4 x half> %a) #0 {
 ; CHECK-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
 ; CHECK-NEXT:    vcvttph2udq %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v4f16_to_v4i32:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vpsrld $16, %xmm0, %xmm1
+; NOVL-NEXT:    vcvttsh2usi %xmm1, %eax
+; NOVL-NEXT:    vcvttsh2usi %xmm0, %ecx
+; NOVL-NEXT:    vmovd %ecx, %xmm1
+; NOVL-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
+; NOVL-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; NOVL-NEXT:    vcvttsh2usi %xmm2, %eax
+; NOVL-NEXT:    vpinsrd $2, %eax, %xmm1, %xmm1
+; NOVL-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; NOVL-NEXT:    vcvttsh2usi %xmm0, %eax
+; NOVL-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm0
+; NOVL-NEXT:    retq
   %ret = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f16(<4 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <4 x i32> %ret
@@ -185,6 +332,16 @@ define <4 x i16> @strict_vector_fptosi_v4f16_to_v4i16(<4 x half> %a) #0 {
 ; CHECK-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
 ; CHECK-NEXT:    vcvttph2w %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v4f16_to_v4i16:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; NOVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinserti32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2w %zmm0, %zmm0
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f16(<4 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <4 x i16> %ret
@@ -196,6 +353,16 @@ define <4 x i16> @strict_vector_fptoui_v4f16_to_v4i16(<4 x half> %a) #0 {
 ; CHECK-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
 ; CHECK-NEXT:    vcvttph2uw %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v4f16_to_v4i16:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; NOVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinserti32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2uw %zmm0, %zmm0
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f16(<4 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <4 x i16> %ret
@@ -208,6 +375,16 @@ define <4 x i8> @strict_vector_fptosi_v4f16_to_v4i8(<4 x half> %a) #0 {
 ; CHECK-NEXT:    vcvttph2w %xmm0, %xmm0
 ; CHECK-NEXT:    vpmovwb %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v4f16_to_v4i8:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; NOVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinserti32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2w %zmm0, %zmm0
+; NOVL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f16(<4 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <4 x i8> %ret
@@ -220,6 +397,16 @@ define <4 x i8> @strict_vector_fptoui_v4f16_to_v4i8(<4 x half> %a) #0 {
 ; CHECK-NEXT:    vcvttph2uw %xmm0, %xmm0
 ; CHECK-NEXT:    vpmovwb %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v4f16_to_v4i8:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; NOVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinserti32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2uw %zmm0, %zmm0
+; NOVL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f16(<4 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <4 x i8> %ret
@@ -235,6 +422,37 @@ define <4 x i1> @strict_vector_fptosi_v4f16_to_v4i1(<4 x half> %a) #0 {
 ; CHECK-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v4f16_to_v4i1:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vcvttsh2si %xmm0, %eax
+; NOVL-NEXT:    andl $1, %eax
+; NOVL-NEXT:    kmovw %eax, %k0
+; NOVL-NEXT:    vpsrld $16, %xmm0, %xmm1
+; NOVL-NEXT:    vcvttsh2si %xmm1, %eax
+; NOVL-NEXT:    kmovd %eax, %k1
+; NOVL-NEXT:    kshiftlw $15, %k1, %k1
+; NOVL-NEXT:    kshiftrw $14, %k1, %k1
+; NOVL-NEXT:    korw %k1, %k0, %k0
+; NOVL-NEXT:    movw $-5, %ax
+; NOVL-NEXT:    kmovd %eax, %k1
+; NOVL-NEXT:    kandw %k1, %k0, %k0
+; NOVL-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; NOVL-NEXT:    vcvttsh2si %xmm1, %eax
+; NOVL-NEXT:    kmovd %eax, %k1
+; NOVL-NEXT:    kshiftlw $2, %k1, %k1
+; NOVL-NEXT:    korw %k1, %k0, %k0
+; NOVL-NEXT:    kshiftlw $13, %k0, %k0
+; NOVL-NEXT:    kshiftrw $13, %k0, %k0
+; NOVL-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; NOVL-NEXT:    vcvttsh2si %xmm0, %eax
+; NOVL-NEXT:    kmovd %eax, %k1
+; NOVL-NEXT:    kshiftlw $3, %k1, %k1
+; NOVL-NEXT:    korw %k1, %k0, %k1
+; NOVL-NEXT:    vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f16(<4 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <4 x i1> %ret
@@ -250,6 +468,37 @@ define <4 x i1> @strict_vector_fptoui_v4f16_to_v4i1(<4 x half> %a) #0 {
 ; CHECK-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vmovdqa32 %xmm0, %xmm0 {%k1} {z}
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v4f16_to_v4i1:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vcvttsh2si %xmm0, %eax
+; NOVL-NEXT:    andl $1, %eax
+; NOVL-NEXT:    kmovw %eax, %k0
+; NOVL-NEXT:    vpsrld $16, %xmm0, %xmm1
+; NOVL-NEXT:    vcvttsh2si %xmm1, %eax
+; NOVL-NEXT:    kmovd %eax, %k1
+; NOVL-NEXT:    kshiftlw $15, %k1, %k1
+; NOVL-NEXT:    kshiftrw $14, %k1, %k1
+; NOVL-NEXT:    korw %k1, %k0, %k0
+; NOVL-NEXT:    movw $-5, %ax
+; NOVL-NEXT:    kmovd %eax, %k1
+; NOVL-NEXT:    kandw %k1, %k0, %k0
+; NOVL-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; NOVL-NEXT:    vcvttsh2si %xmm1, %eax
+; NOVL-NEXT:    kmovd %eax, %k1
+; NOVL-NEXT:    kshiftlw $2, %k1, %k1
+; NOVL-NEXT:    korw %k1, %k0, %k0
+; NOVL-NEXT:    kshiftlw $13, %k0, %k0
+; NOVL-NEXT:    kshiftrw $13, %k0, %k0
+; NOVL-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; NOVL-NEXT:    vcvttsh2si %xmm0, %eax
+; NOVL-NEXT:    kmovd %eax, %k1
+; NOVL-NEXT:    kshiftlw $3, %k1, %k1
+; NOVL-NEXT:    korw %k1, %k0, %k1
+; NOVL-NEXT:    vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f16(<4 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <4 x i1> %ret
@@ -260,6 +509,15 @@ define <8 x i16> @strict_vector_fptosi_v8f16_to_v8i16(<8 x half> %a) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttph2w %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v8f16_to_v8i16:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2w %zmm0, %zmm0
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f16(<8 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <8 x i16> %ret
@@ -270,6 +528,15 @@ define <8 x i16> @strict_vector_fptoui_v8f16_to_v8i16(<8 x half> %a) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttph2uw %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v8f16_to_v8i16:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2uw %zmm0, %zmm0
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f16(<8 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <8 x i16> %ret
@@ -281,6 +548,15 @@ define <8 x i8> @strict_vector_fptosi_v8f16_to_v8i8(<8 x half> %a) #0 {
 ; CHECK-NEXT:    vcvttph2w %xmm0, %xmm0
 ; CHECK-NEXT:    vpmovwb %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v8f16_to_v8i8:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2w %zmm0, %zmm0
+; NOVL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f16(<8 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <8 x i8> %ret
@@ -292,6 +568,15 @@ define <8 x i8> @strict_vector_fptoui_v8f16_to_v8i8(<8 x half> %a) #0 {
 ; CHECK-NEXT:    vcvttph2uw %xmm0, %xmm0
 ; CHECK-NEXT:    vpmovwb %xmm0, %xmm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v8f16_to_v8i8:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2uw %zmm0, %zmm0
+; NOVL-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f16(<8 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <8 x i8> %ret
@@ -305,6 +590,18 @@ define <8 x i1> @strict_vector_fptosi_v8f16_to_v8i1(<8 x half> %a) #0 {
 ; CHECK-NEXT:    vpmovm2w %k0, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v8f16_to_v8i1:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; NOVL-NEXT:    vcvttph2dq %ymm0, %zmm0
+; NOVL-NEXT:    vptestmd %zmm0, %zmm0, %k0
+; NOVL-NEXT:    vpmovm2w %k0, %zmm0
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f16(<8 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <8 x i1> %ret
@@ -319,6 +616,19 @@ define <8 x i1> @strict_vector_fptoui_v8f16_to_v8i1(<8 x half> %a) #0 {
 ; CHECK-NEXT:    vpmovm2w %k0, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v8f16_to_v8i1:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; NOVL-NEXT:    vcvttph2dq %ymm0, %zmm0
+; NOVL-NEXT:    vpslld $31, %ymm0, %ymm0
+; NOVL-NEXT:    vptestmd %zmm0, %zmm0, %k0
+; NOVL-NEXT:    vpmovm2w %k0, %zmm0
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f16(<8 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <8 x i1> %ret
diff --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-256-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-256-fp16.ll
index 7bdb6a45bebcc..a232122e9c707 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-256-fp16.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-256-fp16.ll
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512fp16,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16 -O3 | FileCheck %s --check-prefixes=NOVL
 
 
 declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f16(<4 x half>, metadata)
@@ -20,6 +21,24 @@ define <4 x i64> @strict_vector_fptosi_v4f16_to_v4i64(<4 x half> %a) #0 {
 ; CHECK-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
 ; CHECK-NEXT:    vcvttph2qq %xmm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v4f16_to_v4i64:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vpsrlq $48, %xmm0, %xmm1
+; NOVL-NEXT:    vcvttsh2si %xmm1, %rax
+; NOVL-NEXT:    vmovq %rax, %xmm1
+; NOVL-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; NOVL-NEXT:    vcvttsh2si %xmm2, %rax
+; NOVL-NEXT:    vmovq %rax, %xmm2
+; NOVL-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; NOVL-NEXT:    vcvttsh2si %xmm0, %rax
+; NOVL-NEXT:    vmovq %rax, %xmm2
+; NOVL-NEXT:    vpsrld $16, %xmm0, %xmm0
+; NOVL-NEXT:    vcvttsh2si %xmm0, %rax
+; NOVL-NEXT:    vmovq %rax, %xmm0
+; NOVL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; NOVL-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; NOVL-NEXT:    retq
   %ret = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f16(<4 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <4 x i64> %ret
@@ -31,6 +50,24 @@ define <4 x i64> @strict_vector_fptoui_v4f16_to_v4i64(<4 x half> %a) #0 {
 ; CHECK-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
 ; CHECK-NEXT:    vcvttph2uqq %xmm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v4f16_to_v4i64:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vpsrlq $48, %xmm0, %xmm1
+; NOVL-NEXT:    vcvttsh2usi %xmm1, %rax
+; NOVL-NEXT:    vmovq %rax, %xmm1
+; NOVL-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; NOVL-NEXT:    vcvttsh2usi %xmm2, %rax
+; NOVL-NEXT:    vmovq %rax, %xmm2
+; NOVL-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; NOVL-NEXT:    vcvttsh2usi %xmm0, %rax
+; NOVL-NEXT:    vmovq %rax, %xmm2
+; NOVL-NEXT:    vpsrld $16, %xmm0, %xmm0
+; NOVL-NEXT:    vcvttsh2usi %xmm0, %rax
+; NOVL-NEXT:    vmovq %rax, %xmm0
+; NOVL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; NOVL-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; NOVL-NEXT:    retq
   %ret = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f16(<4 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <4 x i64> %ret
@@ -41,6 +78,15 @@ define <8 x i32> @strict_vector_fptosi_v8f16_to_v8i32(<8 x half> %a) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttph2dq %xmm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v8f16_to_v8i32:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; NOVL-NEXT:    vcvttph2dq %ymm0, %zmm0
+; NOVL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; NOVL-NEXT:    retq
   %ret = call <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f16(<8 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <8 x i32> %ret
@@ -51,6 +97,15 @@ define <8 x i32> @strict_vector_fptoui_v8f16_to_v8i32(<8 x half> %a) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttph2udq %xmm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v8f16_to_v8i32:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; NOVL-NEXT:    vcvttph2udq %ymm0, %zmm0
+; NOVL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; NOVL-NEXT:    retq
   %ret = call <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f16(<8 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <8 x i32> %ret
@@ -61,6 +116,14 @@ define <16 x i16> @strict_vector_fptosi_v16f16_to_v16i16(<16 x half> %a) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttph2w %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v16f16_to_v16i16:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinsertf64x4 $0, %ymm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2w %zmm0, %zmm0
+; NOVL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; NOVL-NEXT:    retq
   %ret = call <16 x i16> @llvm.experimental.constrained.fptosi.v16i16.v16f16(<16 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <16 x i16> %ret
@@ -71,6 +134,14 @@ define <16 x i16> @strict_vector_fptoui_v16f16_to_v16i16(<16 x half> %a) #0 {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttph2uw %ymm0, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v16f16_to_v16i16:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vinsertf64x4 $0, %ymm0, %zmm1, %zmm0
+; NOVL-NEXT:    vcvttph2uw %zmm0, %zmm0
+; NOVL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; NOVL-NEXT:    retq
   %ret = call <16 x i16> @llvm.experimental.constrained.fptoui.v16i16.v16f16(<16 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <16 x i16> %ret
@@ -83,6 +154,13 @@ define <16 x i8> @strict_vector_fptosi_v16f16_to_v16i8(<16 x half> %a) #0 {
 ; CHECK-NEXT:    vpmovdb %zmm0, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v16f16_to_v16i8:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vcvttph2dq %ymm0, %zmm0
+; NOVL-NEXT:    vpmovdb %zmm0, %xmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <16 x i8> @llvm.experimental.constrained.fptosi.v16i8.v16f16(<16 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <16 x i8> %ret
@@ -95,6 +173,13 @@ define <16 x i8> @strict_vector_fptoui_v16f16_to_v16i8(<16 x half> %a) #0 {
 ; CHECK-NEXT:    vpmovdb %zmm0, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v16f16_to_v16i8:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vcvttph2dq %ymm0, %zmm0
+; NOVL-NEXT:    vpmovdb %zmm0, %xmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <16 x i8> @llvm.experimental.constrained.fptoui.v16i8.v16f16(<16 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <16 x i8> %ret
@@ -108,6 +193,15 @@ define <16 x i1> @strict_vector_fptosi_v16f16_to_v16i1(<16 x half> %a) #0 {
 ; CHECK-NEXT:    vpmovm2b %k0, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v16f16_to_v16i1:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vcvttph2dq %ymm0, %zmm0
+; NOVL-NEXT:    vptestmd %zmm0, %zmm0, %k0
+; NOVL-NEXT:    vpmovm2b %k0, %zmm0
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <16 x i1> @llvm.experimental.constrained.fptosi.v16i1.v16f16(<16 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <16 x i1> %ret
@@ -122,6 +216,16 @@ define <16 x i1> @strict_vector_fptoui_v16f16_to_v16i1(<16 x half> %a) #0 {
 ; CHECK-NEXT:    vpmovm2b %k0, %xmm0
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v16f16_to_v16i1:
+; NOVL:       # %bb.0:
+; NOVL-NEXT:    vcvttph2dq %ymm0, %zmm0
+; NOVL-NEXT:    vpslld $31, %zmm0, %zmm0
+; NOVL-NEXT:    vptestmd %zmm0, %zmm0, %k0
+; NOVL-NEXT:    vpmovm2b %k0, %zmm0
+; NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT:    vzeroupper
+; NOVL-NEXT:    retq
   %ret = call <16 x i1> @llvm.experimental.constrained.fptoui.v16i1.v16f16(<16 x half> %a,
                                               metadata !"fpexcept.strict") #0
   ret <16 x i1> %ret



More information about the llvm-commits mailing list