[llvm] r369870 - [X86] Add test case for inserting/extracting from shuffled vectors. NFC

Amaury Sechet via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 25 08:19:20 PDT 2019


Author: deadalnix
Date: Sun Aug 25 08:19:20 2019
New Revision: 369870

URL: http://llvm.org/viewvc/llvm-project?rev=369870&view=rev
Log:
[X86] Add test case for inserting/extracting from shuffled vectors. NFC

Modified:
    llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll?rev=369870&r1=369869&r2=369870&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll Sun Aug 25 08:19:20 2019
@@ -2819,3 +2819,83 @@ define <4 x i32> @PR41545(<4 x i32> %a0,
   %15 = add <4 x i32> %a0, %14
   ret <4 x i32> %15
 }
+
+define <8 x i16> @shuffle_extract_insert(<8 x i16> %a) {
+; SSE2-LABEL: shuffle_extract_insert:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pextrw $3, %xmm0, %eax
+; SSE2-NEXT:    pextrw $4, %xmm0, %r8d
+; SSE2-NEXT:    pextrw $5, %xmm0, %edx
+; SSE2-NEXT:    pextrw $6, %xmm0, %esi
+; SSE2-NEXT:    movd %xmm0, %edi
+; SSE2-NEXT:    pextrw $7, %xmm0, %ecx
+; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
+; SSE2-NEXT:    pinsrw $2, %edi, %xmm0
+; SSE2-NEXT:    pinsrw $3, %eax, %xmm0
+; SSE2-NEXT:    pinsrw $4, %esi, %xmm0
+; SSE2-NEXT:    pinsrw $5, %edx, %xmm0
+; SSE2-NEXT:    pinsrw $6, %r8d, %xmm0
+; SSE2-NEXT:    pinsrw $7, %ecx, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: shuffle_extract_insert:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    pextrw $3, %xmm0, %eax
+; SSSE3-NEXT:    pextrw $4, %xmm0, %r8d
+; SSSE3-NEXT:    pextrw $5, %xmm0, %edx
+; SSSE3-NEXT:    pextrw $6, %xmm0, %esi
+; SSSE3-NEXT:    movd %xmm0, %edi
+; SSSE3-NEXT:    pextrw $7, %xmm0, %ecx
+; SSSE3-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
+; SSSE3-NEXT:    pinsrw $2, %edi, %xmm0
+; SSSE3-NEXT:    pinsrw $3, %eax, %xmm0
+; SSSE3-NEXT:    pinsrw $4, %esi, %xmm0
+; SSSE3-NEXT:    pinsrw $5, %edx, %xmm0
+; SSSE3-NEXT:    pinsrw $6, %r8d, %xmm0
+; SSSE3-NEXT:    pinsrw $7, %ecx, %xmm0
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: shuffle_extract_insert:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pextrw $4, %xmm0, %eax
+; SSE41-NEXT:    pextrw $6, %xmm0, %ecx
+; SSE41-NEXT:    movd %xmm0, %edx
+; SSE41-NEXT:    pshuflw {{.*#+}} xmm1 = xmm0[2,1,2,3,4,5,6,7]
+; SSE41-NEXT:    pinsrw $2, %edx, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3],xmm1[4,5,6,7]
+; SSE41-NEXT:    pinsrw $4, %ecx, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5],xmm1[6,7]
+; SSE41-NEXT:    pinsrw $6, %eax, %xmm1
+; SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
+; SSE41-NEXT:    retq
+;
+; AVX-LABEL: shuffle_extract_insert:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpextrw $4, %xmm0, %eax
+; AVX-NEXT:    vpextrw $6, %xmm0, %ecx
+; AVX-NEXT:    vmovd %xmm0, %edx
+; AVX-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[2,1,2,3,4,5,6,7]
+; AVX-NEXT:    vpinsrw $2, %edx, %xmm1, %xmm1
+; AVX-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3],xmm1[4,5,6,7]
+; AVX-NEXT:    vpinsrw $4, %ecx, %xmm1, %xmm1
+; AVX-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5],xmm1[6,7]
+; AVX-NEXT:    vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
+; AVX-NEXT:    retq
+  %a0 = extractelement <8 x i16> %a, i32 0
+  %a1 = extractelement <8 x i16> %a, i32 1
+  %a3 = extractelement <8 x i16> %a, i32 3
+  %a4 = extractelement <8 x i16> %a, i32 4
+  %a5 = extractelement <8 x i16> %a, i32 5
+  %a6 = extractelement <8 x i16> %a, i32 6
+  %a7 = extractelement <8 x i16> %a, i32 7
+  %1 = shufflevector <8 x i16> %a, <8 x i16> %a, <8 x i32> <i32 2, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %2 = insertelement <8 x i16> %1, i16 %a1, i32 1
+  %3 = insertelement <8 x i16> %2, i16 %a0, i32 2
+  %4 = insertelement <8 x i16> %3, i16 %a3, i32 3
+  %5 = insertelement <8 x i16> %4, i16 %a6, i32 4
+  %6 = insertelement <8 x i16> %5, i16 %a5, i32 5
+  %7 = insertelement <8 x i16> %6, i16 %a4, i32 6
+  %8 = insertelement <8 x i16> %7, i16 %a7, i32 7
+  ret <8 x i16> %8
+}




More information about the llvm-commits mailing list