[llvm] af1accd - [X86] Teach computeKnownBitsForTargetNode that the upper half of X86ISD::MOVQ2DQ is all zero.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat May 30 20:04:32 PDT 2020


Author: Craig Topper
Date: 2020-05-30T19:47:07-07:00
New Revision: af1accdd860d4e1768a1f56a8651ae4d13445e14

URL: https://github.com/llvm/llvm-project/commit/af1accdd860d4e1768a1f56a8651ae4d13445e14
DIFF: https://github.com/llvm/llvm-project/commit/af1accdd860d4e1768a1f56a8651ae4d13445e14.diff

LOG: [X86] Teach computeKnownBitsForTargetNode that the upper half of X86ISD::MOVQ2DQ is all zero.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/mmx-cvt.ll
    llvm/test/CodeGen/X86/vec_insert-7.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 6ebd46893f95..fa1b194afc1a 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -33402,6 +33402,12 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
     }
     break;
   }
+  case X86ISD::MOVQ2DQ: {
+    // Move from MMX to XMM. Upper half of XMM should be 0.
+    if (DemandedElts.countTrailingZeros() >= (NumElts / 2))
+      Known.setAllZero();
+    break;
+  }
   }
 
   // Handle target shuffles.

diff  --git a/llvm/test/CodeGen/X86/mmx-cvt.ll b/llvm/test/CodeGen/X86/mmx-cvt.ll
index 339df30892a6..803b3d936720 100644
--- a/llvm/test/CodeGen/X86/mmx-cvt.ll
+++ b/llvm/test/CodeGen/X86/mmx-cvt.ll
@@ -298,7 +298,6 @@ define <4 x float> @sitofp_v2i32_v2f32(<1 x i64>*) nounwind {
 ; X86-NEXT:    movq (%eax), %mm0
 ; X86-NEXT:    paddd %mm0, %mm0
 ; X86-NEXT:    movq2dq %mm0, %xmm0
-; X86-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; X86-NEXT:    cvtdq2ps %xmm0, %xmm0
 ; X86-NEXT:    retl
 ;
@@ -307,7 +306,6 @@ define <4 x float> @sitofp_v2i32_v2f32(<1 x i64>*) nounwind {
 ; X64-NEXT:    movq (%rdi), %mm0
 ; X64-NEXT:    paddd %mm0, %mm0
 ; X64-NEXT:    movq2dq %mm0, %xmm0
-; X64-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
 ; X64-NEXT:    cvtdq2ps %xmm0, %xmm0
 ; X64-NEXT:    retq
   %2 = bitcast <1 x i64>* %0 to x86_mmx*

diff  --git a/llvm/test/CodeGen/X86/vec_insert-7.ll b/llvm/test/CodeGen/X86/vec_insert-7.ll
index 52d6e7ca7a2c..e4b9806ab7e6 100644
--- a/llvm/test/CodeGen/X86/vec_insert-7.ll
+++ b/llvm/test/CodeGen/X86/vec_insert-7.ll
@@ -8,12 +8,7 @@
 define x86_mmx @mmx_movzl(x86_mmx %x) nounwind {
 ; X32-LABEL: mmx_movzl:
 ; X32:       ## %bb.0:
-; X32-NEXT:    movq2dq %mm0, %xmm0
-; X32-NEXT:    movl $32, %eax
-; X32-NEXT:    pinsrd $0, %eax, %xmm0
-; X32-NEXT:    pxor %xmm1, %xmm1
-; X32-NEXT:    pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
-; X32-NEXT:    movdq2q %xmm1, %mm0
+; X32-NEXT:    movq LCPI0_0, %mm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: mmx_movzl:


        


More information about the llvm-commits mailing list