[llvm] r306978 - [X86][SSE] Attempt to combine 64-bit and 32-bit shuffles to unary shuffles before bit shifts

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Jul 2 07:16:25 PDT 2017


Author: rksimon
Date: Sun Jul  2 07:16:25 2017
New Revision: 306978

URL: http://llvm.org/viewvc/llvm-project?rev=306978&view=rev
Log:
[X86][SSE] Attempt to combine 64-bit and 32-bit shuffles to unary shuffles before bit shifts

We are combining shuffles to bit shifts before unary permutes, which means we can't fold loads plus the destination register is destructive

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/cast-vsel.ll
    llvm/trunk/test/CodeGen/X86/extract-store.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=306978&r1=306977&r2=306978&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sun Jul  2 07:16:25 2017
@@ -27156,6 +27156,26 @@ static bool matchUnaryPermuteVectorShuff
     }
   }
 
+  // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
+  // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
+  // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
+  if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
+      !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
+    SmallVector<int, 4> RepeatedMask;
+    if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
+      // Narrow the repeated mask to create 32-bit element permutes.
+      SmallVector<int, 4> WordMask = RepeatedMask;
+      if (MaskScalarSizeInBits == 64)
+        scaleShuffleMask(2, RepeatedMask, WordMask);
+
+      Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
+      ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
+      ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
+      PermuteImm = getV4X86ShuffleImm(WordMask);
+      return true;
+    }
+  }
+
   // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
   if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
     SmallVector<int, 4> RepeatedMask;
@@ -27201,38 +27221,7 @@ static bool matchUnaryPermuteVectorShuff
     }
   }
 
-  // Ensure we don't contain any zero elements.
-  if (ContainsZeros)
-    return false;
-
-  assert(llvm::all_of(Mask, [&](int M) {
-                        return SM_SentinelUndef <= M && M < (int)NumMaskElts;
-                      }) && "Expected unary shuffle");
-
-  // We only support permutation of 32/64 bit elements after this.
-  if (MaskScalarSizeInBits != 32 && MaskScalarSizeInBits != 64)
-    return false;
-
-  // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
-  // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
-  if ((AllowFloatDomain && !AllowIntDomain) && !Subtarget.hasAVX())
-    return false;
-
-  // We need a repeating shuffle mask for VPERMILPS/PSHUFD.
-  SmallVector<int, 4> RepeatedMask;
-  if (!is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask))
-    return false;
-
-  // Narrow the repeated mask for 32-bit element permutes.
-  SmallVector<int, 4> WordMask = RepeatedMask;
-  if (MaskScalarSizeInBits == 64)
-    scaleShuffleMask(2, RepeatedMask, WordMask);
-
-  Shuffle = (AllowFloatDomain ? X86ISD::VPERMILPI : X86ISD::PSHUFD);
-  ShuffleVT = (AllowFloatDomain ? MVT::f32 : MVT::i32);
-  ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
-  PermuteImm = getV4X86ShuffleImm(WordMask);
-  return true;
+  return false;
 }
 
 // Attempt to match a combined unary shuffle mask against supported binary

Modified: llvm/trunk/test/CodeGen/X86/cast-vsel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/cast-vsel.ll?rev=306978&r1=306977&r2=306978&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/cast-vsel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/cast-vsel.ll Sun Jul  2 07:16:25 2017
@@ -148,7 +148,7 @@ define <4 x double> @fpext(<4 x double>
 ; SSE2-NEXT:    andnps %xmm5, %xmm0
 ; SSE2-NEXT:    orps %xmm4, %xmm0
 ; SSE2-NEXT:    cvtps2pd %xmm0, %xmm2
-; SSE2-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; SSE2-NEXT:    cvtps2pd %xmm0, %xmm1
 ; SSE2-NEXT:    movaps %xmm2, %xmm0
 ; SSE2-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/extract-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extract-store.ll?rev=306978&r1=306977&r2=306978&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extract-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extract-store.ll Sun Jul  2 07:16:25 2017
@@ -345,7 +345,7 @@ define void @extract_i64_1(i64* nocaptur
 ; SSE-X32-LABEL: extract_i64_1:
 ; SSE-X32:       # BB#0:
 ; SSE-X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; SSE-X32-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-X32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; SSE-X32-NEXT:    movq %xmm0, (%eax)
 ; SSE-X32-NEXT:    retl
 ;




More information about the llvm-commits mailing list