[llvm] r314777 - [X86][SSE] Add support for shuffle combining from PACKSS/PACKUS

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 3 02:54:03 PDT 2017


Author: rksimon
Date: Tue Oct  3 02:54:03 2017
New Revision: 314777

URL: http://llvm.org/viewvc/llvm-project?rev=314777&view=rev
Log:
[X86][SSE] Add support for shuffle combining from PACKSS/PACKUS

Mentioned in D38472

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/vector-mul.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=314777&r1=314776&r2=314777&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue Oct  3 02:54:03 2017
@@ -31918,6 +31918,10 @@ static SDValue combineVectorPack(SDNode
     return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
   }
 
+  // Attempt to combine as shuffle.
+  SDValue Op(N, 0);
+  combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 1,
+                                /*HasVarMask*/ false, DAG, DCI, Subtarget);
   return SDValue();
 }
 

Modified: llvm/trunk/test/CodeGen/X86/vector-mul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-mul.ll?rev=314777&r1=314776&r2=314777&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-mul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-mul.ll Tue Oct  3 02:54:03 2017
@@ -358,13 +358,10 @@ define <16 x i8> @mul_v16i8_17(<16 x i8>
 ; X64-XOP-NEXT:    vpmovsxbw %xmm0, %xmm1
 ; X64-XOP-NEXT:    vmovdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17]
 ; X64-XOP-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
-; X64-XOP-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; X64-XOP-NEXT:    vpand %xmm3, %xmm1, %xmm1
 ; X64-XOP-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X64-XOP-NEXT:    vpmovsxbw %xmm0, %xmm0
 ; X64-XOP-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
-; X64-XOP-NEXT:    vpand %xmm3, %xmm0, %xmm0
-; X64-XOP-NEXT:    vpackuswb %xmm0, %xmm1, %xmm0
+; X64-XOP-NEXT:    vpperm {{.*#+}} xmm0 = xmm1[0,2,4,6,8,10,12,14],xmm0[0,2,4,6,8,10,12,14]
 ; X64-XOP-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: mul_v16i8_17:
@@ -493,13 +490,10 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_
 ; X64-XOP:       # BB#0:
 ; X64-XOP-NEXT:    vpmovsxbw %xmm0, %xmm1
 ; X64-XOP-NEXT:    vpmullw {{.*}}(%rip), %xmm1, %xmm1
-; X64-XOP-NEXT:    vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; X64-XOP-NEXT:    vpand %xmm2, %xmm1, %xmm1
 ; X64-XOP-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X64-XOP-NEXT:    vpmovsxbw %xmm0, %xmm0
 ; X64-XOP-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
-; X64-XOP-NEXT:    vpand %xmm2, %xmm0, %xmm0
-; X64-XOP-NEXT:    vpackuswb %xmm0, %xmm1, %xmm0
+; X64-XOP-NEXT:    vpperm {{.*#+}} xmm0 = xmm1[0,2,4,6,8,10,12,14],xmm0[0,2,4,6,8,10,12,14]
 ; X64-XOP-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3:
@@ -637,13 +631,10 @@ define <16 x i8> @mul_v16i8_31(<16 x i8>
 ; X64-XOP-NEXT:    vpmovsxbw %xmm0, %xmm1
 ; X64-XOP-NEXT:    vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31]
 ; X64-XOP-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
-; X64-XOP-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; X64-XOP-NEXT:    vpand %xmm3, %xmm1, %xmm1
 ; X64-XOP-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X64-XOP-NEXT:    vpmovsxbw %xmm0, %xmm0
 ; X64-XOP-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
-; X64-XOP-NEXT:    vpand %xmm3, %xmm0, %xmm0
-; X64-XOP-NEXT:    vpackuswb %xmm0, %xmm1, %xmm0
+; X64-XOP-NEXT:    vpperm {{.*#+}} xmm0 = xmm1[0,2,4,6,8,10,12,14],xmm0[0,2,4,6,8,10,12,14]
 ; X64-XOP-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: mul_v16i8_31:
@@ -1005,13 +996,10 @@ define <16 x i8> @mul_v16i8_0_1_3_7_15_3
 ; X64-XOP-NEXT:    vpmovsxbw %xmm0, %xmm1
 ; X64-XOP-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127]
 ; X64-XOP-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
-; X64-XOP-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; X64-XOP-NEXT:    vpand %xmm3, %xmm1, %xmm1
 ; X64-XOP-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
 ; X64-XOP-NEXT:    vpmovsxbw %xmm0, %xmm0
 ; X64-XOP-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
-; X64-XOP-NEXT:    vpand %xmm3, %xmm0, %xmm0
-; X64-XOP-NEXT:    vpackuswb %xmm0, %xmm1, %xmm0
+; X64-XOP-NEXT:    vpperm {{.*#+}} xmm0 = xmm1[0,2,4,6,8,10,12,14],xmm0[0,2,4,6,8,10,12,14]
 ; X64-XOP-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127:




More information about the llvm-commits mailing list