[llvm] 1692dff - Revert "[X86] Avoid converting u64 to f32 using x87 on Windows"

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 19 21:36:20 PST 2023


Author: Craig Topper
Date: 2023-01-19T21:36:07-08:00
New Revision: 1692dff0b33c840447446064d56ad06ba694665e

URL: https://github.com/llvm/llvm-project/commit/1692dff0b33c840447446064d56ad06ba694665e
DIFF: https://github.com/llvm/llvm-project/commit/1692dff0b33c840447446064d56ad06ba694665e.diff

LOG: Revert "[X86] Avoid converting u64 to f32 using x87 on Windows"

This reverts commit a6e3027db7ebe6863e44bafcfeaacc16bdc88a3f.

Chrome and Halide are both reporting issues with importing builtins.

Maybe the better direction is to manually adjust FPCW for the inline
sequence on Windows.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/uint64-to-float.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index fb68a9c0056d9..c88c66d8b2edc 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -21898,13 +21898,6 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
   }
 
   assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
-
-  // On Windows, the default precision control on x87 is only 53-bit, and FADD
-  // triggers rounding with that precision, so the final result may be less
-  // accurate. 18014397972611071 is one such case.
-  if (Subtarget.isOSWindows())
-    return SDValue();
-
   SDValue ValueToStore = Src;
   if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) {
     // Bitcasting to f64 here allows us to do a single 64-bit store from

diff  --git a/llvm/test/CodeGen/X86/uint64-to-float.ll b/llvm/test/CodeGen/X86/uint64-to-float.ll
index f44986b2582be..8b6623476ebaa 100644
--- a/llvm/test/CodeGen/X86/uint64-to-float.ll
+++ b/llvm/test/CodeGen/X86/uint64-to-float.ll
@@ -1,8 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -mtriple=i686-windows -mattr=+sse2 | FileCheck %s --check-prefix=X86-WIN
-; RUN: llc < %s -mtriple=x86_64-windows -mattr=+sse2 | FileCheck %s --check-prefix=X64-WIN
+; RUN: llc < %s -mtriple=i686-apple-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-apple-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
 
 ; Verify that we are using the efficient uitofp --> sitofp lowering illustrated
 ; by the compiler_rt implementation of __floatundisf.
@@ -44,36 +42,6 @@ define float @test(i64 %a) nounwind {
 ; X64-NEXT:    cvtsi2ss %rdi, %xmm0
 ; X64-NEXT:    addss %xmm0, %xmm0
 ; X64-NEXT:    retq
-;
-; X86-WIN-LABEL: test:
-; X86-WIN:       # %bb.0: # %entry
-; X86-WIN-NEXT:    pushl %ebp
-; X86-WIN-NEXT:    movl %esp, %ebp
-; X86-WIN-NEXT:    andl $-8, %esp
-; X86-WIN-NEXT:    subl $8, %esp
-; X86-WIN-NEXT:    pushl 12(%ebp)
-; X86-WIN-NEXT:    pushl 8(%ebp)
-; X86-WIN-NEXT:    calll ___floatundisf
-; X86-WIN-NEXT:    addl $8, %esp
-; X86-WIN-NEXT:    movl %ebp, %esp
-; X86-WIN-NEXT:    popl %ebp
-; X86-WIN-NEXT:    retl
-;
-; X64-WIN-LABEL: test:
-; X64-WIN:       # %bb.0: # %entry
-; X64-WIN-NEXT:    testq %rcx, %rcx
-; X64-WIN-NEXT:    js .LBB0_1
-; X64-WIN-NEXT:  # %bb.2: # %entry
-; X64-WIN-NEXT:    cvtsi2ss %rcx, %xmm0
-; X64-WIN-NEXT:    retq
-; X64-WIN-NEXT:  .LBB0_1:
-; X64-WIN-NEXT:    movq %rcx, %rax
-; X64-WIN-NEXT:    shrq %rax
-; X64-WIN-NEXT:    andl $1, %ecx
-; X64-WIN-NEXT:    orq %rax, %rcx
-; X64-WIN-NEXT:    cvtsi2ss %rcx, %xmm0
-; X64-WIN-NEXT:    addss %xmm0, %xmm0
-; X64-WIN-NEXT:    retq
 entry:
   %b = uitofp i64 %a to float
   ret float %b


        


More information about the llvm-commits mailing list