[llvm] 5f3bf59 - [X86] Fix f128->i16 fptosi to promote the i16 to i32 before trying to form a libcall.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 20 17:10:12 PST 2019


Author: Craig Topper
Date: 2019-11-20T17:09:31-08:00
New Revision: 5f3bf5967b8d07dd170f0f8a2e085e0c26f7c710

URL: https://github.com/llvm/llvm-project/commit/5f3bf5967b8d07dd170f0f8a2e085e0c26f7c710
DIFF: https://github.com/llvm/llvm-project/commit/5f3bf5967b8d07dd170f0f8a2e085e0c26f7c710.diff

LOG: [X86] Fix f128->i16 fptosi to promote the i16 to i32 before trying to form a libcall.

Previously one of the test cases added here gave an error.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/fp128-cast.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 4482b023e2c6..c7c8f667decc 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -19550,19 +19550,6 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
   MVT SrcVT = Src.getSimpleValueType();
   SDLoc dl(Op);
 
-  if (SrcVT == MVT::f128) {
-    RTLIB::Libcall LC;
-    if (Op.getOpcode() == ISD::FP_TO_SINT)
-      LC = RTLIB::getFPTOSINT(SrcVT, VT);
-    else
-      LC = RTLIB::getFPTOUINT(SrcVT, VT);
-
-    // FIXME: Strict fp!
-    assert(!IsStrict && "Unhandled strict operation!");
-    MakeLibCallOptions CallOptions;
-    return makeLibCall(DAG, LC, VT, Src, CallOptions, SDLoc(Op)).first;
-  }
-
   if (VT.isVector()) {
     if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
       MVT ResVT = MVT::v4i32;
@@ -19636,10 +19623,10 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
       return SDValue();
   }
 
-  // Promote i16 to i32 if we can use a SSE operation.
+  // Promote i16 to i32 if we can use a SSE operation or the type is f128.
   // FIXME: This does not generate an invalid exception if the input does not
   // fit in i16. PR44019
-  if (VT == MVT::i16 && UseSSEReg) {
+  if (VT == MVT::i16 && (UseSSEReg || SrcVT == MVT::f128)) {
     assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
     SDValue Res, Chain;
     if (IsStrict) {
@@ -19659,6 +19646,20 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
   if (UseSSEReg && IsSigned)
     return Op;
 
+  // fp128 needs to use a libcall.
+  if (SrcVT == MVT::f128) {
+    RTLIB::Libcall LC;
+    if (Op.getOpcode() == ISD::FP_TO_SINT)
+      LC = RTLIB::getFPTOSINT(SrcVT, VT);
+    else
+      LC = RTLIB::getFPTOUINT(SrcVT, VT);
+
+    // FIXME: Strict fp!
+    assert(!IsStrict && "Unhandled strict operation!");
+    MakeLibCallOptions CallOptions;
+    return makeLibCall(DAG, LC, VT, Src, CallOptions, SDLoc(Op)).first;
+  }
+
   // Fall back to X87.
   SDValue Chain;
   if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {

diff  --git a/llvm/test/CodeGen/X86/fp128-cast.ll b/llvm/test/CodeGen/X86/fp128-cast.ll
index 3089add42298..00d97cbb2ece 100644
--- a/llvm/test/CodeGen/X86/fp128-cast.ll
+++ b/llvm/test/CodeGen/X86/fp128-cast.ll
@@ -9,6 +9,7 @@
 
 ; Check soft floating point conversion function calls.
 
+ at vi16 = common global i16 0, align 2
 @vi32 = common global i32 0, align 4
 @vi64 = common global i64 0, align 8
 @vi128 = common global i128 0, align 16
@@ -163,6 +164,82 @@ entry:
   ret void
 }
 
+define void @TestFPToSIF128_I16() nounwind {
+; X64-SSE-LABEL: TestFPToSIF128_I16:
+; X64-SSE:       # %bb.0: # %entry
+; X64-SSE-NEXT:    pushq %rax
+; X64-SSE-NEXT:    movaps {{.*}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __fixtfsi
+; X64-SSE-NEXT:    movw %ax, {{.*}}(%rip)
+; X64-SSE-NEXT:    popq %rax
+; X64-SSE-NEXT:    retq
+;
+; X32-LABEL: TestFPToSIF128_I16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl vf128+12
+; X32-NEXT:    pushl vf128+8
+; X32-NEXT:    pushl vf128+4
+; X32-NEXT:    pushl vf128
+; X32-NEXT:    calll __fixtfsi
+; X32-NEXT:    addl $16, %esp
+; X32-NEXT:    movw %ax, vi16
+; X32-NEXT:    addl $12, %esp
+; X32-NEXT:    retl
+;
+; X64-AVX-LABEL: TestFPToSIF128_I16:
+; X64-AVX:       # %bb.0: # %entry
+; X64-AVX-NEXT:    pushq %rax
+; X64-AVX-NEXT:    vmovaps {{.*}}(%rip), %xmm0
+; X64-AVX-NEXT:    callq __fixtfsi
+; X64-AVX-NEXT:    movw %ax, {{.*}}(%rip)
+; X64-AVX-NEXT:    popq %rax
+; X64-AVX-NEXT:    retq
+entry:
+  %0 = load fp128, fp128* @vf128, align 16
+  %conv = fptosi fp128 %0 to i16
+  store i16 %conv, i16* @vi16, align 2
+  ret void
+}
+
+define void @TestFPToUIF128_I16() nounwind {
+; X64-SSE-LABEL: TestFPToUIF128_I16:
+; X64-SSE:       # %bb.0: # %entry
+; X64-SSE-NEXT:    pushq %rax
+; X64-SSE-NEXT:    movaps {{.*}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __fixtfsi
+; X64-SSE-NEXT:    movw %ax, {{.*}}(%rip)
+; X64-SSE-NEXT:    popq %rax
+; X64-SSE-NEXT:    retq
+;
+; X32-LABEL: TestFPToUIF128_I16:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl vf128+12
+; X32-NEXT:    pushl vf128+8
+; X32-NEXT:    pushl vf128+4
+; X32-NEXT:    pushl vf128
+; X32-NEXT:    calll __fixunstfsi
+; X32-NEXT:    addl $16, %esp
+; X32-NEXT:    movw %ax, vi16
+; X32-NEXT:    addl $12, %esp
+; X32-NEXT:    retl
+;
+; X64-AVX-LABEL: TestFPToUIF128_I16:
+; X64-AVX:       # %bb.0: # %entry
+; X64-AVX-NEXT:    pushq %rax
+; X64-AVX-NEXT:    vmovaps {{.*}}(%rip), %xmm0
+; X64-AVX-NEXT:    callq __fixtfsi
+; X64-AVX-NEXT:    movw %ax, {{.*}}(%rip)
+; X64-AVX-NEXT:    popq %rax
+; X64-AVX-NEXT:    retq
+entry:
+  %0 = load fp128, fp128* @vf128, align 16
+  %conv = fptoui fp128 %0 to i16
+  store i16 %conv, i16* @vi16, align 2
+  ret void
+}
+
 define void @TestFPToSIF128_I32() nounwind {
 ; X64-SSE-LABEL: TestFPToSIF128_I32:
 ; X64-SSE:       # %bb.0: # %entry
@@ -1082,7 +1159,7 @@ define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind {
 ; X64-SSE-LABEL: TestTruncCopysign:
 ; X64-SSE:       # %bb.0: # %entry
 ; X64-SSE-NEXT:    cmpl $50001, %edi # imm = 0xC351
-; X64-SSE-NEXT:    jl .LBB22_2
+; X64-SSE-NEXT:    jl .LBB24_2
 ; X64-SSE-NEXT:  # %bb.1: # %if.then
 ; X64-SSE-NEXT:    pushq %rax
 ; X64-SSE-NEXT:    callq __trunctfdf2
@@ -1091,7 +1168,7 @@ define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind {
 ; X64-SSE-NEXT:    orps %xmm1, %xmm0
 ; X64-SSE-NEXT:    callq __extenddftf2
 ; X64-SSE-NEXT:    addq $8, %rsp
-; X64-SSE-NEXT:  .LBB22_2: # %cleanup
+; X64-SSE-NEXT:  .LBB24_2: # %cleanup
 ; X64-SSE-NEXT:    retq
 ;
 ; X32-LABEL: TestTruncCopysign:
@@ -1105,7 +1182,7 @@ define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind {
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X32-NEXT:    cmpl $50001, {{[0-9]+}}(%esp) # imm = 0xC351
-; X32-NEXT:    jl .LBB22_4
+; X32-NEXT:    jl .LBB24_4
 ; X32-NEXT:  # %bb.1: # %if.then
 ; X32-NEXT:    pushl %eax
 ; X32-NEXT:    pushl %ecx
@@ -1117,11 +1194,11 @@ define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind {
 ; X32-NEXT:    testb $-128, {{[0-9]+}}(%esp)
 ; X32-NEXT:    flds {{\.LCPI.*}}
 ; X32-NEXT:    flds {{\.LCPI.*}}
-; X32-NEXT:    jne .LBB22_3
+; X32-NEXT:    jne .LBB24_3
 ; X32-NEXT:  # %bb.2: # %if.then
 ; X32-NEXT:    fstp %st(1)
 ; X32-NEXT:    fldz
-; X32-NEXT:  .LBB22_3: # %if.then
+; X32-NEXT:  .LBB24_3: # %if.then
 ; X32-NEXT:    fstp %st(0)
 ; X32-NEXT:    subl $16, %esp
 ; X32-NEXT:    leal {{[0-9]+}}(%esp), %eax
@@ -1133,7 +1210,7 @@ define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind {
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT:  .LBB22_4: # %cleanup
+; X32-NEXT:  .LBB24_4: # %cleanup
 ; X32-NEXT:    movl %edx, (%esi)
 ; X32-NEXT:    movl %edi, 4(%esi)
 ; X32-NEXT:    movl %ecx, 8(%esi)
@@ -1147,7 +1224,7 @@ define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind {
 ; X64-AVX-LABEL: TestTruncCopysign:
 ; X64-AVX:       # %bb.0: # %entry
 ; X64-AVX-NEXT:    cmpl $50001, %edi # imm = 0xC351
-; X64-AVX-NEXT:    jl .LBB22_2
+; X64-AVX-NEXT:    jl .LBB24_2
 ; X64-AVX-NEXT:  # %bb.1: # %if.then
 ; X64-AVX-NEXT:    pushq %rax
 ; X64-AVX-NEXT:    callq __trunctfdf2
@@ -1157,7 +1234,7 @@ define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind {
 ; X64-AVX-NEXT:    vorps %xmm0, %xmm1, %xmm0
 ; X64-AVX-NEXT:    callq __extenddftf2
 ; X64-AVX-NEXT:    addq $8, %rsp
-; X64-AVX-NEXT:  .LBB22_2: # %cleanup
+; X64-AVX-NEXT:  .LBB24_2: # %cleanup
 ; X64-AVX-NEXT:    retq
 entry:
   %cmp = icmp sgt i32 %n, 50000


        


More information about the llvm-commits mailing list