[llvm] r288000 - [X86][SSE] Added tests showing missed combines for shuffle to shifts.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 27 10:25:02 PST 2016


Author: rksimon
Date: Sun Nov 27 12:25:02 2016
New Revision: 288000

URL: http://llvm.org/viewvc/llvm-project?rev=288000&view=rev
Log:
[X86][SSE] Added tests showing missed combines for shuffle to shifts.

Modified:
    llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll?rev=288000&r1=287999&r2=288000&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll Sun Nov 27 12:25:02 2016
@@ -511,6 +511,48 @@ define <32 x i8> @combine_pshufb_as_psrl
   ret <32 x i8> %res0
 }
 
+define <32 x i8> @combine_pshufb_as_psrlw(<32 x i8> %a0) {
+; X32-LABEL: combine_pshufb_as_psrlw:
+; X32:       # BB#0:
+; X32-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1],zero,ymm0[3],zero,ymm0[5],zero,ymm0[7],zero,ymm0[9],zero,ymm0[11],zero,ymm0[13],zero,ymm0[15],zero,ymm0[17],zero,ymm0[19],zero,ymm0[21],zero,ymm0[23],zero,ymm0[25],zero,ymm0[27],zero,ymm0[29],zero,ymm0[31],zero
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_psrlw:
+; X64:       # BB#0:
+; X64-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1],zero,ymm0[3],zero,ymm0[5],zero,ymm0[7],zero,ymm0[9],zero,ymm0[11],zero,ymm0[13],zero,ymm0[15],zero,ymm0[17],zero,ymm0[19],zero,ymm0[21],zero,ymm0[23],zero,ymm0[25],zero,ymm0[27],zero,ymm0[29],zero,ymm0[31],zero
+; X64-NEXT:    retq
+  %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 1, i8 128, i8 3, i8 128, i8 5, i8 128, i8 7, i8 128, i8 9, i8 128, i8 11, i8 128, i8 13, i8 128, i8 15, i8 128, i8 17, i8 128, i8 19, i8 128, i8 21, i8 128, i8 23, i8 128, i8 25, i8 128, i8 27, i8 128, i8 29, i8 128, i8 31, i8 128>)
+  ret <32 x i8> %res0
+}
+
+define <32 x i8> @combine_pshufb_as_pslld(<32 x i8> %a0) {
+; X32-LABEL: combine_pshufb_as_pslld:
+; X32:       # BB#0:
+; X32-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm0[0],zero,zero,zero,ymm0[4],zero,zero,zero,ymm0[8],zero,zero,zero,ymm0[12],zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[20],zero,zero,zero,ymm0[24],zero,zero,zero,ymm0[28]
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_pslld:
+; X64:       # BB#0:
+; X64-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm0[0],zero,zero,zero,ymm0[4],zero,zero,zero,ymm0[8],zero,zero,zero,ymm0[12],zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[20],zero,zero,zero,ymm0[24],zero,zero,zero,ymm0[28]
+; X64-NEXT:    retq
+  %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 128, i8 128, i8 128, i8 0, i8 128, i8 128, i8 128, i8 4, i8 128, i8 128, i8 128, i8 8, i8 128, i8 128, i8 128, i8 12, i8 128, i8 128, i8 128, i8 16, i8 128, i8 128, i8 128, i8 20, i8 128, i8 128, i8 128, i8 24, i8 128, i8 128, i8 128, i8 28>)
+  ret <32 x i8> %res0
+}
+
+define <32 x i8> @combine_pshufb_as_psrlq(<32 x i8> %a0) {
+; X32-LABEL: combine_pshufb_as_psrlq:
+; X32:       # BB#0:
+; X32-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[5,6,7],zero,zero,zero,zero,zero,ymm0[13,14,15],zero,zero,zero,zero,zero,ymm0[21,22,23],zero,zero,zero,zero,zero,ymm0[29,30,31],zero,zero,zero,zero,zero
+; X32-NEXT:    retl
+;
+; X64-LABEL: combine_pshufb_as_psrlq:
+; X64:       # BB#0:
+; X64-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[5,6,7],zero,zero,zero,zero,zero,ymm0[13,14,15],zero,zero,zero,zero,zero,ymm0[21,22,23],zero,zero,zero,zero,zero,ymm0[29,30,31],zero,zero,zero,zero,zero
+; X64-NEXT:    retq
+  %res0 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 5, i8 6, i8 7, i8 128, i8 128, i8 128, i8 128, i8 128, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 21, i8 22, i8 23, i8 128, i8 128, i8 128, i8 128, i8 128, i8 29, i8 30, i8 31, i8 128, i8 128, i8 128, i8 128, i8 128>)
+  ret <32 x i8> %res0
+}
+
 define <32 x i8> @combine_pshufb_as_pshuflw(<32 x i8> %a0) {
 ; X32-LABEL: combine_pshufb_as_pshuflw:
 ; X32:       # BB#0:

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll?rev=288000&r1=287999&r2=288000&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll Sun Nov 27 12:25:02 2016
@@ -296,6 +296,48 @@ define <16 x i8> @combine_pshufb_as_psrl
   ret <16 x i8> %res0
 }
 
+define <16 x i8> @combine_pshufb_as_psrlw(<16 x i8> %a0) {
+; SSE-LABEL: combine_pshufb_as_psrlw:
+; SSE:       # BB#0:
+; SSE-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[1],zero,xmm0[3],zero,xmm0[5],zero,xmm0[7],zero,xmm0[9],zero,xmm0[11],zero,xmm0[13],zero,xmm0[15],zero
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_pshufb_as_psrlw:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1],zero,xmm0[3],zero,xmm0[5],zero,xmm0[7],zero,xmm0[9],zero,xmm0[11],zero,xmm0[13],zero,xmm0[15],zero
+; AVX-NEXT:    retq
+  %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 1, i8 128, i8 3, i8 128, i8 5, i8 128, i8 7, i8 128, i8 9, i8 128, i8 11, i8 128, i8 13, i8 128, i8 15, i8 128>)
+  ret <16 x i8> %res0
+}
+
+define <16 x i8> @combine_pshufb_as_pslld(<16 x i8> %a0) {
+; SSE-LABEL: combine_pshufb_as_pslld:
+; SSE:       # BB#0:
+; SSE-NEXT:    pshufb {{.*#+}} xmm0 = zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[12]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_pshufb_as_pslld:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[12]
+; AVX-NEXT:    retq
+  %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 128, i8 128, i8 0, i8 128, i8 128, i8 128, i8 4, i8 128, i8 128, i8 128, i8 8, i8 128, i8 128, i8 128, i8 12>)
+  ret <16 x i8> %res0
+}
+
+define <16 x i8> @combine_pshufb_as_psrlq(<16 x i8> %a0) {
+; SSE-LABEL: combine_pshufb_as_psrlq:
+; SSE:       # BB#0:
+; SSE-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[5,6,7],zero,zero,zero,zero,zero,xmm0[13,14,15],zero,zero,zero,zero,zero
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_pshufb_as_psrlq:
+; AVX:       # BB#0:
+; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,6,7],zero,zero,zero,zero,zero,xmm0[13,14,15],zero,zero,zero,zero,zero
+; AVX-NEXT:    retq
+  %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 5, i8 6, i8 7, i8 128, i8 128, i8 128, i8 128, i8 128, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128>)
+  ret <16 x i8> %res0
+}
+
 define <16 x i8> @combine_pshufb_as_pshuflw(<16 x i8> %a0) {
 ; SSE-LABEL: combine_pshufb_as_pshuflw:
 ; SSE:       # BB#0:




More information about the llvm-commits mailing list