[llvm] r278794 - [X86][SSE] Add support for combining v2f64 target shuffles to VZEXT_MOVL byte rotations

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 16 05:52:07 PDT 2016


Author: rksimon
Date: Tue Aug 16 07:52:06 2016
New Revision: 278794

URL: http://llvm.org/viewvc/llvm-project?rev=278794&view=rev
Log:
[X86][SSE] Add support for combining v2f64 target shuffles to VZEXT_MOVL byte rotations

The combine was only matching v2i64 as it assumed lowering to MOVQ - but we have v2f64 patterns that match in a similar fashion

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=278794&r1=278793&r2=278794&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue Aug 16 07:52:06 2016
@@ -24884,11 +24884,11 @@ static bool matchUnaryVectorShuffle(MVT
   bool FloatDomain = MaskVT.isFloatingPoint() ||
                      (!Subtarget.hasAVX2() && MaskVT.is256BitVector());
 
-  // Match a 128-bit integer vector against a VZEXT_MOVL (MOVQ) instruction.
-  if (!FloatDomain && MaskVT.is128BitVector() &&
+  // Match a 128-bit vector against a VZEXT_MOVL instruction.
+  if (MaskVT.is128BitVector() && Subtarget.hasSSE2() &&
       isTargetShuffleEquivalent(Mask, {0, SM_SentinelZero})) {
     Shuffle = X86ISD::VZEXT_MOVL;
-    ShuffleVT = MVT::v2i64;
+    ShuffleVT = MaskVT;
     return true;
   }
 

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll?rev=278794&r1=278793&r2=278794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll Tue Aug 16 07:52:06 2016
@@ -2837,16 +2837,12 @@ define void @combine_scalar_load_with_bl
 ; SSE41-LABEL: combine_scalar_load_with_blend_with_zero:
 ; SSE41:       # BB#0:
 ; SSE41-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE41-NEXT:    xorpd %xmm1, %xmm1
-; SSE41-NEXT:    blendpd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
-; SSE41-NEXT:    movapd %xmm1, (%rsi)
+; SSE41-NEXT:    movapd %xmm0, (%rsi)
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: combine_scalar_load_with_blend_with_zero:
 ; AVX:       # BB#0:
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; AVX-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
 ; AVX-NEXT:    vmovapd %xmm0, (%rsi)
 ; AVX-NEXT:    retq
   %1 = load double, double* %a0, align 8




More information about the llvm-commits mailing list