[llvm] [X86] lowerFPToIntToFP - handle UI2FP on AVX512VL targets and i64 types on AVX512DQ targets (PR #162656)

Kavin Gnanapandithan via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 30 18:18:57 PDT 2025


https://github.com/KavinTheG updated https://github.com/llvm/llvm-project/pull/162656

>From 8d2789fb1fc54c4793785b9dd9a63f5c09d875c7 Mon Sep 17 00:00:00 2001
From: Kavin Gnanapandithan <kavin.balag at gmail.com>
Date: Thu, 9 Oct 2025 08:41:07 -0400
Subject: [PATCH] [X86] Add optimizations for fp to signed & unsigned i64/32
 conversions. (#160111)

Extends lowerFPToIntToFP to support i64/i32 conversions on AVX VLX + DQ
and DQ targets, as well as modifies appropriate test cases.
---
 llvm/lib/Target/X86/X86ISelLowering.cpp |  67 +++-
 llvm/test/CodeGen/X86/fp-int-fp-cvt.ll  | 154 ++++++--
 llvm/test/CodeGen/X86/isint.ll          | 451 ++++++++++++++++++++++--
 llvm/test/CodeGen/X86/setoeq.ll         |   8 +-
 4 files changed, 592 insertions(+), 88 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 49beadae63f03..812eb19387db2 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -19920,7 +19920,9 @@ static SDValue lowerFPToIntToFP(SDValue CastToFP, const SDLoc &DL,
   // TODO: Allow FP_TO_UINT.
   SDValue CastToInt = CastToFP.getOperand(0);
   MVT VT = CastToFP.getSimpleValueType();
-  if (CastToInt.getOpcode() != ISD::FP_TO_SINT || VT.isVector())
+  if ((CastToInt.getOpcode() != ISD::FP_TO_SINT &&
+       CastToInt.getOpcode() != ISD::FP_TO_UINT) ||
+      VT.isVector())
     return SDValue();
 
   MVT IntVT = CastToInt.getSimpleValueType();
@@ -19932,22 +19934,60 @@ static SDValue lowerFPToIntToFP(SDValue CastToFP, const SDLoc &DL,
   // See if we have 128-bit vector cast instructions for this type of cast.
   // We need cvttps2dq/cvttpd2dq and cvtdq2ps/cvtdq2pd.
   if (!Subtarget.hasSSE2() || (VT != MVT::f32 && VT != MVT::f64) ||
-      IntVT != MVT::i32)
+      (IntVT != MVT::i32 && IntVT != MVT::i64))
     return SDValue();
 
   unsigned SrcSize = SrcVT.getSizeInBits();
   unsigned IntSize = IntVT.getSizeInBits();
   unsigned VTSize = VT.getSizeInBits();
-  MVT VecSrcVT = MVT::getVectorVT(SrcVT, 128 / SrcSize);
-  MVT VecIntVT = MVT::getVectorVT(IntVT, 128 / IntSize);
-  MVT VecVT = MVT::getVectorVT(VT, 128 / VTSize);
-
-  // We need target-specific opcodes if this is v2f64 -> v4i32 -> v2f64.
-  unsigned ToIntOpcode =
-      SrcSize != IntSize ? X86ISD::CVTTP2SI : (unsigned)ISD::FP_TO_SINT;
-  unsigned ToFPOpcode =
-      IntSize != VTSize ? X86ISD::CVTSI2P : (unsigned)ISD::SINT_TO_FP;
-
+  unsigned ToIntOpcode, ToFPOpcode;
+  unsigned Width = 128;
+  bool IsUnsigned = CastToInt.getOpcode() == ISD::FP_TO_UINT;
+
+  if (Subtarget.hasVLX() && Subtarget.hasDQI()) {
+    // AVX512DQ+VLX
+    if (IsUnsigned) {
+      ToIntOpcode =
+          SrcSize != IntSize ? X86ISD::CVTTP2UI : (unsigned)ISD::FP_TO_UINT;
+      ToFPOpcode =
+          IntSize != VTSize ? X86ISD::CVTUI2P : (unsigned)ISD::UINT_TO_FP;
+    } else {
+      ToIntOpcode =
+          SrcSize != IntSize ? X86ISD::CVTTP2SI : (unsigned)ISD::FP_TO_SINT;
+      ToFPOpcode =
+          IntSize != VTSize ? X86ISD::CVTSI2P : (unsigned)ISD::SINT_TO_FP;
+    }
+  } else if (Subtarget.hasDQI()) {
+    // Need to extend width for AVX512DQ without AVX512VL.
+    Width = 512;
+    ToIntOpcode = CastToInt.getOpcode();
+    ToFPOpcode = IsUnsigned ? ISD::UINT_TO_FP : ISD::SINT_TO_FP;
+  } else {
+    // SSE2 can only perform f64/f32 <-> i32 signed.
+    if (IsUnsigned || IntVT == MVT::i64)
+      return SDValue();
+    ToIntOpcode =
+        SrcSize != IntSize ? X86ISD::CVTTP2SI : (unsigned)ISD::FP_TO_SINT;
+    ToFPOpcode =
+        IntSize != VTSize ? X86ISD::CVTSI2P : (unsigned)ISD::SINT_TO_FP;
+  }
+
+  MVT VecSrcVT, VecIntVT, VecVT;
+  unsigned NumElts;
+  unsigned SrcElts, VTElts;
+  // Some conversions are only legal with uniform vector sizes on AVXDQ.
+  if (Width == 512) {
+    NumElts = std::min(Width / IntSize, Width / SrcSize);
+    SrcElts = NumElts;
+    VTElts = NumElts;
+  } else {
+    NumElts = Width / IntSize;
+    SrcElts = Width / SrcSize;
+    VTElts = Width / VTSize;
+  }
+  VecIntVT = MVT::getVectorVT(IntVT, NumElts);
+  VecSrcVT = MVT::getVectorVT(SrcVT, SrcElts);
+  VecVT = MVT::getVectorVT(VT, VTElts);
   // sint_to_fp (fp_to_sint X) --> extelt (sint_to_fp (fp_to_sint (s2v X))), 0
   //
   // We are not defining the high elements (for example, zero them) because
@@ -20618,6 +20658,9 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
   else if (isLegalConversion(SrcVT, DstVT, false, Subtarget))
     return Op;
 
+  if (SDValue V = lowerFPToIntToFP(Op, dl, DAG, Subtarget))
+    return V;
+
   if (DstVT.isVector())
     return lowerUINT_TO_FP_vec(Op, dl, DAG, Subtarget);
 
diff --git a/llvm/test/CodeGen/X86/fp-int-fp-cvt.ll b/llvm/test/CodeGen/X86/fp-int-fp-cvt.ll
index b6c17cecffbd6..9f82140399bef 100644
--- a/llvm/test/CodeGen/X86/fp-int-fp-cvt.ll
+++ b/llvm/test/CodeGen/X86/fp-int-fp-cvt.ll
@@ -16,11 +16,26 @@ define double @scvtf64_i32(double %a0) {
 ; SSE-NEXT:    cvtdq2pd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: scvtf64_i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vcvttpd2dq %xmm0, %xmm0
-; AVX-NEXT:    vcvtdq2pd %xmm0, %xmm0
-; AVX-NEXT:    retq
+; AVX2-LABEL: scvtf64_i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vcvttpd2dq %xmm0, %xmm0
+; AVX2-NEXT:    vcvtdq2pd %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-VL-LABEL: scvtf64_i32:
+; AVX512-VL:       # %bb.0:
+; AVX512-VL-NEXT:    vcvttpd2dq %xmm0, %xmm0
+; AVX512-VL-NEXT:    vcvtdq2pd %xmm0, %xmm0
+; AVX512-VL-NEXT:    retq
+;
+; AVX512-NOVL-LABEL: scvtf64_i32:
+; AVX512-NOVL:       # %bb.0:
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512-NOVL-NEXT:    vcvttpd2dq %zmm0, %ymm0
+; AVX512-NOVL-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512-NOVL-NEXT:    vzeroupper
+; AVX512-NOVL-NEXT:    retq
   %ii = fptosi double %a0 to i32
   %ff = sitofp i32 %ii to double
   ret double %ff
@@ -34,11 +49,26 @@ define double @scvtf64_i64(double %a0) {
 ; SSE-NEXT:    cvtsi2sd %rax, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: scvtf64_i64:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vcvttsd2si %xmm0, %rax
-; AVX-NEXT:    vcvtsi2sd %rax, %xmm15, %xmm0
-; AVX-NEXT:    retq
+; AVX2-LABEL: scvtf64_i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vcvttsd2si %xmm0, %rax
+; AVX2-NEXT:    vcvtsi2sd %rax, %xmm15, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-VL-LABEL: scvtf64_i64:
+; AVX512-VL:       # %bb.0:
+; AVX512-VL-NEXT:    vcvttpd2qq %xmm0, %xmm0
+; AVX512-VL-NEXT:    vcvtqq2pd %xmm0, %xmm0
+; AVX512-VL-NEXT:    retq
+;
+; AVX512-NOVL-LABEL: scvtf64_i64:
+; AVX512-NOVL:       # %bb.0:
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512-NOVL-NEXT:    vcvttpd2qq %zmm0, %zmm0
+; AVX512-NOVL-NEXT:    vcvtqq2pd %zmm0, %zmm0
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512-NOVL-NEXT:    vzeroupper
+; AVX512-NOVL-NEXT:    retq
   %ii = fptosi double %a0 to i64
   %ff = sitofp i64 %ii to double
   ret double %ff
@@ -69,11 +99,26 @@ define float @scvtf32_i64(float %a0) {
 ; SSE-NEXT:    cvtsi2ss %rax, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: scvtf32_i64:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vcvttss2si %xmm0, %rax
-; AVX-NEXT:    vcvtsi2ss %rax, %xmm15, %xmm0
-; AVX-NEXT:    retq
+; AVX2-LABEL: scvtf32_i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vcvttss2si %xmm0, %rax
+; AVX2-NEXT:    vcvtsi2ss %rax, %xmm15, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-VL-LABEL: scvtf32_i64:
+; AVX512-VL:       # %bb.0:
+; AVX512-VL-NEXT:    vcvttps2qq %xmm0, %xmm0
+; AVX512-VL-NEXT:    vcvtqq2ps %xmm0, %xmm0
+; AVX512-VL-NEXT:    retq
+;
+; AVX512-NOVL-LABEL: scvtf32_i64:
+; AVX512-NOVL:       # %bb.0:
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NOVL-NEXT:    vcvttps2qq %ymm0, %zmm0
+; AVX512-NOVL-NEXT:    vcvtqq2ps %zmm0, %ymm0
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NOVL-NEXT:    vzeroupper
+; AVX512-NOVL-NEXT:    retq
   %ii = fptosi float %a0 to i64
   %ff = sitofp i64 %ii to float
   ret float %ff
@@ -99,11 +144,20 @@ define double @ucvtf64_i32(double %a0) {
 ; AVX2-NEXT:    vcvtsi2sd %rax, %xmm15, %xmm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: ucvtf64_i32:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vcvttsd2usi %xmm0, %eax
-; AVX512-NEXT:    vcvtusi2sd %eax, %xmm15, %xmm0
-; AVX512-NEXT:    retq
+; AVX512-VL-LABEL: ucvtf64_i32:
+; AVX512-VL:       # %bb.0:
+; AVX512-VL-NEXT:    vcvttpd2udq %xmm0, %xmm0
+; AVX512-VL-NEXT:    vcvtudq2pd %xmm0, %xmm0
+; AVX512-VL-NEXT:    retq
+;
+; AVX512-NOVL-LABEL: ucvtf64_i32:
+; AVX512-NOVL:       # %bb.0:
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512-NOVL-NEXT:    vcvttpd2udq %zmm0, %ymm0
+; AVX512-NOVL-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512-NOVL-NEXT:    vzeroupper
+; AVX512-NOVL-NEXT:    retq
   %ii = fptoui double %a0 to i32
   %ff = uitofp i32 %ii to double
   ret double %ff
@@ -143,11 +197,20 @@ define double @ucvtf64_i64(double %a0) {
 ; AVX2-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: ucvtf64_i64:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vcvttsd2usi %xmm0, %rax
-; AVX512-NEXT:    vcvtusi2sd %rax, %xmm15, %xmm0
-; AVX512-NEXT:    retq
+; AVX512-VL-LABEL: ucvtf64_i64:
+; AVX512-VL:       # %bb.0:
+; AVX512-VL-NEXT:    vcvttpd2uqq %xmm0, %xmm0
+; AVX512-VL-NEXT:    vcvtuqq2pd %xmm0, %xmm0
+; AVX512-VL-NEXT:    retq
+;
+; AVX512-NOVL-LABEL: ucvtf64_i64:
+; AVX512-NOVL:       # %bb.0:
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512-NOVL-NEXT:    vcvttpd2uqq %zmm0, %zmm0
+; AVX512-NOVL-NEXT:    vcvtuqq2pd %zmm0, %zmm0
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512-NOVL-NEXT:    vzeroupper
+; AVX512-NOVL-NEXT:    retq
   %ii = fptoui double %a0 to i64
   %ff = uitofp i64 %ii to double
   ret double %ff
@@ -169,11 +232,20 @@ define float @ucvtf32_i32(float %a0) {
 ; AVX2-NEXT:    vcvtsi2ss %rax, %xmm15, %xmm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: ucvtf32_i32:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vcvttss2usi %xmm0, %eax
-; AVX512-NEXT:    vcvtusi2ss %eax, %xmm15, %xmm0
-; AVX512-NEXT:    retq
+; AVX512-VL-LABEL: ucvtf32_i32:
+; AVX512-VL:       # %bb.0:
+; AVX512-VL-NEXT:    vcvttps2udq %xmm0, %xmm0
+; AVX512-VL-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; AVX512-VL-NEXT:    retq
+;
+; AVX512-NOVL-LABEL: ucvtf32_i32:
+; AVX512-NOVL:       # %bb.0:
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512-NOVL-NEXT:    vcvttps2udq %zmm0, %zmm0
+; AVX512-NOVL-NEXT:    vcvtudq2ps %zmm0, %zmm0
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512-NOVL-NEXT:    vzeroupper
+; AVX512-NOVL-NEXT:    retq
   %ii = fptoui float %a0 to i32
   %ff = uitofp i32 %ii to float
   ret float %ff
@@ -226,15 +298,23 @@ define float @ucvtf32_i64(float %a0) {
 ; AVX2-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: ucvtf32_i64:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vcvttss2usi %xmm0, %rax
-; AVX512-NEXT:    vcvtusi2ss %rax, %xmm15, %xmm0
-; AVX512-NEXT:    retq
+; AVX512-VL-LABEL: ucvtf32_i64:
+; AVX512-VL:       # %bb.0:
+; AVX512-VL-NEXT:    vcvttps2uqq %xmm0, %xmm0
+; AVX512-VL-NEXT:    vcvtuqq2ps %xmm0, %xmm0
+; AVX512-VL-NEXT:    retq
+;
+; AVX512-NOVL-LABEL: ucvtf32_i64:
+; AVX512-NOVL:       # %bb.0:
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NOVL-NEXT:    vcvttps2uqq %ymm0, %zmm0
+; AVX512-NOVL-NEXT:    vcvtuqq2ps %zmm0, %ymm0
+; AVX512-NOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NOVL-NEXT:    vzeroupper
+; AVX512-NOVL-NEXT:    retq
   %ii = fptoui float %a0 to i64
   %ff = uitofp i64 %ii to float
   ret float %ff
 }
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; AVX512-NOVL: {{.*}}
-; AVX512-VL: {{.*}}
+; AVX512: {{.*}}
diff --git a/llvm/test/CodeGen/X86/isint.ll b/llvm/test/CodeGen/X86/isint.ll
index 8c11fe147f0d8..8765b1faaa972 100644
--- a/llvm/test/CodeGen/X86/isint.ll
+++ b/llvm/test/CodeGen/X86/isint.ll
@@ -1,19 +1,19 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=X64 %s
+; RUN: llc < %s -mtriple=x86_64-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=SSE2 %s
 ; RUN: llc < %s -mtriple=i686-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=X86 %s
-
+; RUN: llc < %s -mtriple=x86_64-pc-unknown -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck -check-prefix=AVX512VL %s
 ; PR19059
 
 define i32 @isint_return(double %d) nounwind {
-; X64-LABEL: isint_return:
-; X64:       # %bb.0:
-; X64-NEXT:    cvttpd2dq %xmm0, %xmm1
-; X64-NEXT:    cvtdq2pd %xmm1, %xmm1
-; X64-NEXT:    cmpeqsd %xmm0, %xmm1
-; X64-NEXT:    movq %xmm1, %rax
-; X64-NEXT:    andl $1, %eax
-; X64-NEXT:    # kill: def $eax killed $eax killed $rax
-; X64-NEXT:    retq
+; SSE2-LABEL: isint_return:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttpd2dq %xmm0, %xmm1
+; SSE2-NEXT:    cvtdq2pd %xmm1, %xmm1
+; SSE2-NEXT:    cmpeqsd %xmm0, %xmm1
+; SSE2-NEXT:    movq %xmm1, %rax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    # kill: def $eax killed $eax killed $rax
+; SSE2-NEXT:    retq
 ;
 ; X86-LABEL: isint_return:
 ; X86:       # %bb.0:
@@ -24,6 +24,14 @@ define i32 @isint_return(double %d) nounwind {
 ; X86-NEXT:    movd %xmm1, %eax
 ; X86-NEXT:    andl $1, %eax
 ; X86-NEXT:    retl
+;
+; AVX512VL-LABEL: isint_return:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvttpd2dq %xmm0, %xmm1
+; AVX512VL-NEXT:    vcvtdq2pd %xmm1, %xmm1
+; AVX512VL-NEXT:    vcmpeqsd %xmm1, %xmm0, %k0
+; AVX512VL-NEXT:    kmovw %k0, %eax
+; AVX512VL-NEXT:    retq
   %i = fptosi double %d to i32
   %e = sitofp i32 %i to double
   %c = fcmp oeq double %d, %e
@@ -32,14 +40,14 @@ define i32 @isint_return(double %d) nounwind {
 }
 
 define i32 @isint_float_return(float %f) nounwind {
-; X64-LABEL: isint_float_return:
-; X64:       # %bb.0:
-; X64-NEXT:    cvttps2dq %xmm0, %xmm1
-; X64-NEXT:    cvtdq2ps %xmm1, %xmm1
-; X64-NEXT:    cmpeqss %xmm0, %xmm1
-; X64-NEXT:    movd %xmm1, %eax
-; X64-NEXT:    andl $1, %eax
-; X64-NEXT:    retq
+; SSE2-LABEL: isint_float_return:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttps2dq %xmm0, %xmm1
+; SSE2-NEXT:    cvtdq2ps %xmm1, %xmm1
+; SSE2-NEXT:    cmpeqss %xmm0, %xmm1
+; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    retq
 ;
 ; X86-LABEL: isint_float_return:
 ; X86:       # %bb.0:
@@ -50,6 +58,14 @@ define i32 @isint_float_return(float %f) nounwind {
 ; X86-NEXT:    movd %xmm1, %eax
 ; X86-NEXT:    andl $1, %eax
 ; X86-NEXT:    retl
+;
+; AVX512VL-LABEL: isint_float_return:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvttps2dq %xmm0, %xmm1
+; AVX512VL-NEXT:    vcvtdq2ps %xmm1, %xmm1
+; AVX512VL-NEXT:    vcmpeqss %xmm1, %xmm0, %k0
+; AVX512VL-NEXT:    kmovw %k0, %eax
+; AVX512VL-NEXT:    retq
   %i = fptosi float %f to i32
   %g = sitofp i32 %i to float
   %c = fcmp oeq float %f, %g
@@ -57,22 +73,373 @@ define i32 @isint_float_return(float %f) nounwind {
   ret i32 %z
 }
 
+define i64 @isint64_float_return(float %f) nounwind {
+; SSE2-LABEL: isint64_float_return:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttss2si %xmm0, %rax
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
+; SSE2-NEXT:    cmpeqss %xmm0, %xmm1
+; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    retq
+;
+; X86-LABEL: isint64_float_return:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-8, %esp
+; X86-NEXT:    subl $32, %esp
+; X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    movss %xmm0, {{[0-9]+}}(%esp)
+; X86-NEXT:    flds {{[0-9]+}}(%esp)
+; X86-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orl $3072, %eax # imm = 0xC00
+; X86-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X86-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X86-NEXT:    fistpll {{[0-9]+}}(%esp)
+; X86-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X86-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    movlps %xmm1, {{[0-9]+}}(%esp)
+; X86-NEXT:    fildll {{[0-9]+}}(%esp)
+; X86-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-NEXT:    cmpeqss {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movd %xmm0, %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
+;
+; AVX512VL-LABEL: isint64_float_return:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvttps2qq %xmm0, %xmm1
+; AVX512VL-NEXT:    vcvtqq2ps %xmm1, %xmm1
+; AVX512VL-NEXT:    vcmpeqss %xmm1, %xmm0, %k0
+; AVX512VL-NEXT:    kmovw %k0, %eax
+; AVX512VL-NEXT:    retq
+  %i = fptosi float %f to i64
+  %g = sitofp i64 %i to float
+  %c = fcmp oeq float %f, %g
+  %z = zext i1 %c to i64
+  ret i64 %z
+}
+
+define i64 @isint64_return(double %d) nounwind {
+; SSE2-LABEL: isint64_return:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttsd2si %xmm0, %rax
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm1
+; SSE2-NEXT:    cmpeqsd %xmm0, %xmm1
+; SSE2-NEXT:    movq %xmm1, %rax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    retq
+;
+; X86-LABEL: isint64_return:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-8, %esp
+; X86-NEXT:    subl $32, %esp
+; X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    movsd %xmm0, {{[0-9]+}}(%esp)
+; X86-NEXT:    fldl {{[0-9]+}}(%esp)
+; X86-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orl $3072, %eax # imm = 0xC00
+; X86-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X86-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X86-NEXT:    fistpll {{[0-9]+}}(%esp)
+; X86-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X86-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    movlps %xmm1, {{[0-9]+}}(%esp)
+; X86-NEXT:    fildll {{[0-9]+}}(%esp)
+; X86-NEXT:    fstpl {{[0-9]+}}(%esp)
+; X86-NEXT:    cmpeqsd {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movd %xmm0, %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
+;
+; AVX512VL-LABEL: isint64_return:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvttpd2qq %xmm0, %xmm1
+; AVX512VL-NEXT:    vcvtqq2pd %xmm1, %xmm1
+; AVX512VL-NEXT:    vcmpeqsd %xmm1, %xmm0, %k0
+; AVX512VL-NEXT:    kmovw %k0, %eax
+; AVX512VL-NEXT:    retq
+  %i = fptosi double %d to i64
+  %g = sitofp i64 %i to double
+  %c = fcmp oeq double %d, %g
+  %z = zext i1 %c to i64
+  ret i64 %z
+}
+
+define i32 @isuint_return(double %d) nounwind {
+; SSE2-LABEL: isuint_return:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttsd2si %xmm0, %rax
+; SSE2-NEXT:    movl %eax, %eax
+; SSE2-NEXT:    cvtsi2sd %rax, %xmm1
+; SSE2-NEXT:    cmpeqsd %xmm0, %xmm1
+; SSE2-NEXT:    movq %xmm1, %rax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    # kill: def $eax killed $eax killed $rax
+; SSE2-NEXT:    retq
+;
+; X86-LABEL: isuint_return:
+; X86:       # %bb.0:
+; X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    cvttsd2si %xmm0, %eax
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    sarl $31, %ecx
+; X86-NEXT:    movapd %xmm0, %xmm1
+; X86-NEXT:    subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    cvttsd2si %xmm1, %edx
+; X86-NEXT:    andl %ecx, %edx
+; X86-NEXT:    orl %eax, %edx
+; X86-NEXT:    movd %edx, %xmm1
+; X86-NEXT:    por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    cmpeqsd %xmm0, %xmm1
+; X86-NEXT:    movd %xmm1, %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    retl
+;
+; AVX512VL-LABEL: isuint_return:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvttpd2udq %xmm0, %xmm1
+; AVX512VL-NEXT:    vcvtudq2pd %xmm1, %xmm1
+; AVX512VL-NEXT:    vcmpeqsd %xmm1, %xmm0, %k0
+; AVX512VL-NEXT:    kmovw %k0, %eax
+; AVX512VL-NEXT:    retq
+  %i = fptoui double %d to i32
+  %e = uitofp i32 %i to double
+  %c = fcmp oeq double %d, %e
+  %z = zext i1 %c to i32
+  ret i32 %z
+}
+
+define i32 @isuint_float_return(float %f) nounwind {
+; SSE2-LABEL: isuint_float_return:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttps2dq %xmm0, %xmm1
+; SSE2-NEXT:    cvtdq2ps %xmm1, %xmm1
+; SSE2-NEXT:    cmpeqss %xmm0, %xmm1
+; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    retq
+;
+; X86-LABEL: isuint_float_return:
+; X86:       # %bb.0:
+; X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    cvttps2dq %xmm0, %xmm1
+; X86-NEXT:    cvtdq2ps %xmm1, %xmm1
+; X86-NEXT:    cmpeqss %xmm0, %xmm1
+; X86-NEXT:    movd %xmm1, %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    retl
+;
+; AVX512VL-LABEL: isuint_float_return:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvttps2dq %xmm0, %xmm1
+; AVX512VL-NEXT:    vcvtdq2ps %xmm1, %xmm1
+; AVX512VL-NEXT:    vcmpeqss %xmm1, %xmm0, %k0
+; AVX512VL-NEXT:    kmovw %k0, %eax
+; AVX512VL-NEXT:    retq
+  %i = fptosi float %f to i32
+  %g = sitofp i32 %i to float
+  %c = fcmp oeq float %f, %g
+  %z = zext i1 %c to i32
+  ret i32 %z
+}
+
+define i64 @isuint64_return(double %d) nounwind {
+; SSE2-LABEL: isuint64_return:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttsd2si %xmm0, %rax
+; SSE2-NEXT:    movq %rax, %rcx
+; SSE2-NEXT:    sarq $63, %rcx
+; SSE2-NEXT:    movapd %xmm0, %xmm1
+; SSE2-NEXT:    subsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT:    cvttsd2si %xmm1, %rdx
+; SSE2-NEXT:    andq %rcx, %rdx
+; SSE2-NEXT:    orq %rax, %rdx
+; SSE2-NEXT:    movq %rdx, %xmm1
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+; SSE2-NEXT:    subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT:    movapd %xmm1, %xmm2
+; SSE2-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
+; SSE2-NEXT:    addsd %xmm1, %xmm2
+; SSE2-NEXT:    cmpeqsd %xmm0, %xmm2
+; SSE2-NEXT:    movq %xmm2, %rax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    retq
+;
+; X86-LABEL: isuint64_return:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-8, %esp
+; X86-NEXT:    subl $16, %esp
+; X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
+; X86-NEXT:    ucomisd %xmm0, %xmm1
+; X86-NEXT:    jbe .LBB6_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    xorpd %xmm1, %xmm1
+; X86-NEXT:  .LBB6_2:
+; X86-NEXT:    movapd %xmm0, %xmm2
+; X86-NEXT:    subsd %xmm1, %xmm2
+; X86-NEXT:    movsd %xmm2, {{[0-9]+}}(%esp)
+; X86-NEXT:    setbe %al
+; X86-NEXT:    fldl {{[0-9]+}}(%esp)
+; X86-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    orl $3072, %ecx # imm = 0xC00
+; X86-NEXT:    movw %cx, {{[0-9]+}}(%esp)
+; X86-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X86-NEXT:    fistpll {{[0-9]+}}(%esp)
+; X86-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    shll $31, %eax
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movd %eax, %xmm1
+; X86-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X86-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
+; X86-NEXT:    subpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; X86-NEXT:    movapd %xmm2, %xmm1
+; X86-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
+; X86-NEXT:    addsd %xmm2, %xmm1
+; X86-NEXT:    cmpeqsd %xmm0, %xmm1
+; X86-NEXT:    movd %xmm1, %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
+;
+; AVX512VL-LABEL: isuint64_return:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvttpd2uqq %xmm0, %xmm1
+; AVX512VL-NEXT:    vcvtuqq2pd %xmm1, %xmm1
+; AVX512VL-NEXT:    vcmpeqsd %xmm1, %xmm0, %k0
+; AVX512VL-NEXT:    kmovw %k0, %eax
+; AVX512VL-NEXT:    retq
+  %i = fptoui double %d to i64
+  %e = uitofp i64 %i to double
+  %c = fcmp oeq double %d, %e
+  %z = zext i1 %c to i64
+  ret i64 %z
+}
+
+define i64 @isuint64_float_return(float %f) nounwind {
+; SSE2-LABEL: isuint64_float_return:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttss2si %xmm0, %rcx
+; SSE2-NEXT:    movq %rcx, %rdx
+; SSE2-NEXT:    sarq $63, %rdx
+; SSE2-NEXT:    movaps %xmm0, %xmm1
+; SSE2-NEXT:    subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT:    cvttss2si %xmm1, %rax
+; SSE2-NEXT:    andq %rdx, %rax
+; SSE2-NEXT:    orq %rcx, %rax
+; SSE2-NEXT:    js .LBB7_1
+; SSE2-NEXT:  # %bb.2:
+; SSE2-NEXT:    xorps %xmm1, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
+; SSE2-NEXT:    jmp .LBB7_3
+; SSE2-NEXT:  .LBB7_1:
+; SSE2-NEXT:    movq %rax, %rcx
+; SSE2-NEXT:    shrq %rcx
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    orq %rcx, %rax
+; SSE2-NEXT:    xorps %xmm1, %xmm1
+; SSE2-NEXT:    cvtsi2ss %rax, %xmm1
+; SSE2-NEXT:    addss %xmm1, %xmm1
+; SSE2-NEXT:  .LBB7_3:
+; SSE2-NEXT:    cmpeqss %xmm1, %xmm0
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    retq
+;
+; X86-LABEL: isuint64_float_return:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-8, %esp
+; X86-NEXT:    subl $32, %esp
+; X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    movss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
+; X86-NEXT:    ucomiss %xmm0, %xmm1
+; X86-NEXT:    jbe .LBB7_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    xorps %xmm1, %xmm1
+; X86-NEXT:  .LBB7_2:
+; X86-NEXT:    movaps %xmm0, %xmm2
+; X86-NEXT:    subss %xmm1, %xmm2
+; X86-NEXT:    movss %xmm2, {{[0-9]+}}(%esp)
+; X86-NEXT:    setbe %al
+; X86-NEXT:    flds {{[0-9]+}}(%esp)
+; X86-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    orl $3072, %ecx # imm = 0xC00
+; X86-NEXT:    movw %cx, {{[0-9]+}}(%esp)
+; X86-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X86-NEXT:    fistpll {{[0-9]+}}(%esp)
+; X86-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    shll $31, %eax
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movd %eax, %xmm1
+; X86-NEXT:    movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X86-NEXT:    movq %xmm2, {{[0-9]+}}(%esp)
+; X86-NEXT:    shrl $31, %eax
+; X86-NEXT:    fildll {{[0-9]+}}(%esp)
+; X86-NEXT:    fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
+; X86-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-NEXT:    cmpeqss {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movd %xmm0, %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
+;
+; AVX512VL-LABEL: isuint64_float_return:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvttps2uqq %xmm0, %xmm1
+; AVX512VL-NEXT:    vcvtuqq2ps %xmm1, %xmm1
+; AVX512VL-NEXT:    vcmpeqss %xmm1, %xmm0, %k0
+; AVX512VL-NEXT:    kmovw %k0, %eax
+; AVX512VL-NEXT:    retq
+  %i = fptoui float %f to i64
+  %g = uitofp i64 %i to float
+  %c = fcmp oeq float %f, %g
+  %z = zext i1 %c to i64
+  ret i64 %z
+}
+
 declare void @foo()
 
 define void @isint_branch(double %d) nounwind {
-; X64-LABEL: isint_branch:
-; X64:       # %bb.0:
-; X64-NEXT:    cvttpd2dq %xmm0, %xmm1
-; X64-NEXT:    cvtdq2pd %xmm1, %xmm1
-; X64-NEXT:    ucomisd %xmm1, %xmm0
-; X64-NEXT:    jne .LBB2_2
-; X64-NEXT:    jp .LBB2_2
-; X64-NEXT:  # %bb.1: # %true
-; X64-NEXT:    pushq %rax
-; X64-NEXT:    callq foo at PLT
-; X64-NEXT:    popq %rax
-; X64-NEXT:  .LBB2_2: # %false
-; X64-NEXT:    retq
+; SSE2-LABEL: isint_branch:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttpd2dq %xmm0, %xmm1
+; SSE2-NEXT:    cvtdq2pd %xmm1, %xmm1
+; SSE2-NEXT:    ucomisd %xmm1, %xmm0
+; SSE2-NEXT:    jne .LBB8_2
+; SSE2-NEXT:    jp .LBB8_2
+; SSE2-NEXT:  # %bb.1: # %true
+; SSE2-NEXT:    pushq %rax
+; SSE2-NEXT:    callq foo at PLT
+; SSE2-NEXT:    popq %rax
+; SSE2-NEXT:  .LBB8_2: # %false
+; SSE2-NEXT:    retq
 ;
 ; X86-LABEL: isint_branch:
 ; X86:       # %bb.0:
@@ -80,12 +447,26 @@ define void @isint_branch(double %d) nounwind {
 ; X86-NEXT:    cvttpd2dq %xmm0, %xmm1
 ; X86-NEXT:    cvtdq2pd %xmm1, %xmm1
 ; X86-NEXT:    ucomisd %xmm1, %xmm0
-; X86-NEXT:    jne .LBB2_2
-; X86-NEXT:    jp .LBB2_2
+; X86-NEXT:    jne .LBB8_2
+; X86-NEXT:    jp .LBB8_2
 ; X86-NEXT:  # %bb.1: # %true
 ; X86-NEXT:    calll foo at PLT
-; X86-NEXT:  .LBB2_2: # %false
+; X86-NEXT:  .LBB8_2: # %false
 ; X86-NEXT:    retl
+;
+; AVX512VL-LABEL: isint_branch:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vcvttpd2dq %xmm0, %xmm1
+; AVX512VL-NEXT:    vcvtdq2pd %xmm1, %xmm1
+; AVX512VL-NEXT:    vucomisd %xmm1, %xmm0
+; AVX512VL-NEXT:    jne .LBB8_2
+; AVX512VL-NEXT:    jp .LBB8_2
+; AVX512VL-NEXT:  # %bb.1: # %true
+; AVX512VL-NEXT:    pushq %rax
+; AVX512VL-NEXT:    callq foo at PLT
+; AVX512VL-NEXT:    popq %rax
+; AVX512VL-NEXT:  .LBB8_2: # %false
+; AVX512VL-NEXT:    retq
   %i = fptosi double %d to i32
   %e = sitofp i32 %i to double
   %c = fcmp oeq double %d, %e
diff --git a/llvm/test/CodeGen/X86/setoeq.ll b/llvm/test/CodeGen/X86/setoeq.ll
index 131e279aa645c..87317879abbc8 100644
--- a/llvm/test/CodeGen/X86/setoeq.ll
+++ b/llvm/test/CodeGen/X86/setoeq.ll
@@ -86,8 +86,8 @@ define zeroext i8 @oeq_f64_u32(double %x) nounwind readnone {
 ; AVX512-LABEL: oeq_f64_u32:
 ; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX512-NEXT:    vcvttsd2usi %xmm0, %eax
-; AVX512-NEXT:    vcvtusi2sd %eax, %xmm7, %xmm1
+; AVX512-NEXT:    vcvttpd2udq %xmm0, %xmm1
+; AVX512-NEXT:    vcvtudq2pd %xmm1, %xmm1
 ; AVX512-NEXT:    vcmpeqsd %xmm0, %xmm1, %k0
 ; AVX512-NEXT:    kmovd %k0, %eax
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax
@@ -350,8 +350,8 @@ define zeroext i8 @une_f64_u32(double %x) nounwind readnone {
 ; AVX512-LABEL: une_f64_u32:
 ; AVX512:       # %bb.0: # %entry
 ; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX512-NEXT:    vcvttsd2usi %xmm0, %eax
-; AVX512-NEXT:    vcvtusi2sd %eax, %xmm7, %xmm1
+; AVX512-NEXT:    vcvttpd2udq %xmm0, %xmm1
+; AVX512-NEXT:    vcvtudq2pd %xmm1, %xmm1
 ; AVX512-NEXT:    vcmpneqsd %xmm0, %xmm1, %k0
 ; AVX512-NEXT:    kmovd %k0, %eax
 ; AVX512-NEXT:    # kill: def $al killed $al killed $eax



More information about the llvm-commits mailing list