[llvm] r364454 - [X86][AVX] Add reduced test case for PR41545

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 26 10:56:54 PDT 2019


Author: rksimon
Date: Wed Jun 26 10:56:53 2019
New Revision: 364454

URL: http://llvm.org/viewvc/llvm-project?rev=364454&view=rev
Log:
[X86][AVX] Add reduced test case for PR41545

Modified:
    llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll?rev=364454&r1=364453&r2=364454&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll Wed Jun 26 10:56:53 2019
@@ -2858,3 +2858,79 @@ define <8 x i16> @PR39549(<16 x i8> %x)
   %d = ashr <8 x i16> %c, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
   ret <8 x i16> %d
 }
+
+define <4 x i32> @PR41545(<4 x i32> %a0, <16 x i8> %a1) {
+; SSE2-LABEL: PR41545:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    paddd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: PR41545:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    paddd %xmm1, %xmm0
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: PR41545:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0]
+; SSE41-NEXT:    pand %xmm1, %xmm2
+; SSE41-NEXT:    pxor %xmm3, %xmm3
+; SSE41-NEXT:    pblendw {{.*#+}} xmm3 = xmm1[0],xmm3[1],xmm1[2],xmm3[3],xmm1[4],xmm3[5],xmm1[6],xmm3[7]
+; SSE41-NEXT:    psrld $24, %xmm1
+; SSE41-NEXT:    pslld $24, %xmm1
+; SSE41-NEXT:    por %xmm1, %xmm3
+; SSE41-NEXT:    por %xmm2, %xmm3
+; SSE41-NEXT:    paddd %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: PR41545:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrld $24, %xmm1, %xmm2
+; AVX1-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm3
+; AVX1-NEXT:    vpslld $24, %xmm2, %xmm2
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; AVX1-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-SLOW-LABEL: PR41545:
+; AVX2-SLOW:       # %bb.0:
+; AVX2-SLOW-NEXT:    vpsrld $24, %xmm1, %xmm2
+; AVX2-SLOW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm3
+; AVX2-SLOW-NEXT:    vpslld $24, %xmm2, %xmm2
+; AVX2-SLOW-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; AVX2-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX2-SLOW-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX2-SLOW-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-SLOW-NEXT:    retq
+;
+; AVX2-FAST-LABEL: PR41545:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm2
+; AVX2-FAST-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm3
+; AVX2-FAST-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; AVX2-FAST-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX2-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX2-FAST-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    retq
+  %1  = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+  %2  = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
+  %3  = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
+  %4  = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
+  %5  = zext <4 x i8> %1 to <4 x i32>
+  %6  = zext <4 x i8> %2 to <4 x i32>
+  %7  = zext <4 x i8> %3 to <4 x i32>
+  %8  = zext <4 x i8> %4 to <4 x i32>
+  %9  = shl <4 x i32> %6, <i32 8, i32 8, i32 8, i32 8>
+  %10 = shl <4 x i32> %7, <i32 16, i32 16, i32 16, i32 16>
+  %11 = shl <4 x i32> %8, <i32 24, i32 24, i32 24, i32 24>
+  %12 = or <4 x i32> %5, %9
+  %13 = or <4 x i32> %12, %10
+  %14 = or <4 x i32> %13, %11
+  %15 = add <4 x i32> %a0, %14
+  ret <4 x i32> %15
+}




More information about the llvm-commits mailing list