[llvm] 30fcd29 - [X86][SSE] lowerShuffleWithSHUFPS - commute '2*V1+2*V2 elements' mask if it allows a loaded fold

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 24 04:04:26 PST 2020


Author: Simon Pilgrim
Date: 2020-01-24T12:04:10Z
New Revision: 30fcd29fe47968427e3c6b26709282331c1aed77

URL: https://github.com/llvm/llvm-project/commit/30fcd29fe47968427e3c6b26709282331c1aed77
DIFF: https://github.com/llvm/llvm-project/commit/30fcd29fe47968427e3c6b26709282331c1aed77.diff

LOG: [X86][SSE] lowerShuffleWithSHUFPS - commute '2*V1+2*V2 elements' mask if it allows a loaded fold

As mentioned on D73023.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index d8c7a51a6890..f2e36cdb3437 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -13316,10 +13316,11 @@ static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
 /// It makes no assumptions about whether this is the *best* lowering, it simply
 /// uses it.
 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
-                                      ArrayRef<int> Mask, SDValue V1,
+                                      ArrayRef<int> OriginalMask, SDValue V1,
                                       SDValue V2, SelectionDAG &DAG) {
   SDValue LowV = V1, HighV = V2;
-  int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
+  SmallVector<int, 4> Mask(OriginalMask.begin(), OriginalMask.end());
+  SmallVector<int, 4> NewMask = Mask;
 
   int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
 
@@ -13357,6 +13358,14 @@ static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
       NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
     }
   } else if (NumV2Elements == 2) {
+    // If we are likely to fold V1 but not V2, then commute the shuffle.
+    if (MayFoldLoad(V1) && !MayFoldLoad(V2)) {
+      ShuffleVectorSDNode::commuteMask(Mask);
+      NewMask = Mask;
+      std::swap(V1, V2);
+      std::swap(LowV, HighV);
+    }
+
     if (Mask[0] < 4 && Mask[1] < 4) {
       // Handle the easy case where we have V1 in the low lanes and V2 in the
       // high lanes.

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
index 0462caf23fee..119d199ff665 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
@@ -2471,17 +2471,14 @@ define <4 x float> @shuffle_mem_v4f32_4523(<4 x float> %a, <4 x float>* %pb) {
 define  <4 x float> @shuffle_mem_v4f32_0624(<4 x float> %a0, <4 x float>* %a1) {
 ; SSE-LABEL: shuffle_mem_v4f32_0624:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movaps (%rdi), %xmm1
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,0]
-; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
-; SSE-NEXT:    movaps %xmm1, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0],mem[0,2]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[2,0,3,1]
 ; SSE-NEXT:    retq
 ;
 ; AVX1OR2-LABEL: shuffle_mem_v4f32_0624:
 ; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    vmovaps (%rdi), %xmm1
-; AVX1OR2-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[2,0]
-; AVX1OR2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX1OR2-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[2,0],mem[0,2]
+; AVX1OR2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
 ; AVX1OR2-NEXT:    retq
 ;
 ; AVX512VL-LABEL: shuffle_mem_v4f32_0624:


        


More information about the llvm-commits mailing list