[llvm] 5883f6f - [X86] Add AVX tests buildvec-insertvec.ll

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Jun 27 02:43:47 PDT 2020


Author: Simon Pilgrim
Date: 2020-06-27T10:43:11+01:00
New Revision: 5883f6f977a9b90913451e3e3dc13e14d7cddaac

URL: https://github.com/llvm/llvm-project/commit/5883f6f977a9b90913451e3e3dc13e14d7cddaac
DIFF: https://github.com/llvm/llvm-project/commit/5883f6f977a9b90913451e3e3dc13e14d7cddaac.diff

LOG: [X86] Add AVX tests buildvec-insertvec.ll

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/buildvec-insertvec.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/buildvec-insertvec.ll b/llvm/test/CodeGen/X86/buildvec-insertvec.ll
index 9fb78491b608..381c9bf3f203 100644
--- a/llvm/test/CodeGen/X86/buildvec-insertvec.ll
+++ b/llvm/test/CodeGen/X86/buildvec-insertvec.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
 
 define void @foo(<3 x float> %in, <4 x i8>* nocapture %out) nounwind {
 ; SSE2-LABEL: foo:
@@ -26,6 +28,15 @@ define void @foo(<3 x float> %in, <4 x i8>* nocapture %out) nounwind {
 ; SSE41-NEXT:    pinsrb $3, %eax, %xmm0
 ; SSE41-NEXT:    movd %xmm0, (%rdi)
 ; SSE41-NEXT:    retq
+;
+; AVX-LABEL: foo:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvttps2dq %xmm0, %xmm0
+; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX-NEXT:    movl $255, %eax
+; AVX-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX-NEXT:    vmovd %xmm0, (%rdi)
+; AVX-NEXT:    retq
   %t0 = fptoui <3 x float> %in to <3 x i8>
   %t1 = shufflevector <3 x i8> %t0, <3 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
   %t2 = insertelement <4 x i8> %t1, i8 -1, i32 3
@@ -52,6 +63,11 @@ define <4 x float> @test_negative_zero_1(<4 x float> %A) {
 ; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2],zero
 ; SSE41-NEXT:    retq
+;
+; AVX-LABEL: test_negative_zero_1:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2],zero
+; AVX-NEXT:    retq
 entry:
   %0 = extractelement <4 x float> %A, i32 0
   %1 = insertelement <4 x float> undef, float %0, i32 0
@@ -74,6 +90,11 @@ define <2 x double> @test_negative_zero_2(<2 x double> %A) {
 ; SSE41:       # %bb.0: # %entry
 ; SSE41-NEXT:    blendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
 ; SSE41-NEXT:    retq
+;
+; AVX-LABEL: test_negative_zero_2:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
+; AVX-NEXT:    retq
 entry:
   %0 = extractelement <2 x double> %A, i32 0
   %1 = insertelement <2 x double> undef, double %0, i32 0
@@ -95,6 +116,13 @@ define <4 x float> @test_buildvector_v4f32_register(float %f0, float %f1, float
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
 ; SSE41-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v4f32_register:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
+; AVX-NEXT:    retq
   %ins0 = insertelement <4 x float> undef, float %f0, i32 0
   %ins1 = insertelement <4 x float> %ins0, float %f1, i32 1
   %ins2 = insertelement <4 x float> %ins1, float %f2, i32 2
@@ -121,6 +149,14 @@ define <4 x float> @test_buildvector_v4f32_load(float* %p0, float* %p1, float* %
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
 ; SSE41-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v4f32_load:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
+; AVX-NEXT:    retq
   %f0 = load float, float* %p0, align 4
   %f1 = load float, float* %p1, align 4
   %f2 = load float, float* %p2, align 4
@@ -147,6 +183,13 @@ define <4 x float> @test_buildvector_v4f32_partial_load(float %f0, float %f1, fl
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
 ; SSE41-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v4f32_partial_load:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
+; AVX-NEXT:    retq
   %f3 = load float, float* %p3, align 4
   %ins0 = insertelement <4 x float> undef, float %f0, i32 0
   %ins1 = insertelement <4 x float> %ins0, float %f1, i32 1
@@ -174,6 +217,14 @@ define <4 x i32> @test_buildvector_v4i32_register(i32 %a0, i32 %a1, i32 %a2, i32
 ; SSE41-NEXT:    pinsrd $2, %edx, %xmm0
 ; SSE41-NEXT:    pinsrd $3, %ecx, %xmm0
 ; SSE41-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v4i32_register:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %edi, %xmm0
+; AVX-NEXT:    vpinsrd $1, %esi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $2, %edx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrd $3, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ins0 = insertelement <4 x i32> undef, i32 %a0, i32 0
   %ins1 = insertelement <4 x i32> %ins0, i32 %a1, i32 1
   %ins2 = insertelement <4 x i32> %ins1, i32 %a2, i32 2
@@ -195,6 +246,12 @@ define <4 x i32> @test_buildvector_v4i32_partial(i32 %a0, i32 %a3) {
 ; SSE41-NEXT:    movd %edi, %xmm0
 ; SSE41-NEXT:    pinsrd $3, %esi, %xmm0
 ; SSE41-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v4i32_partial:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %edi, %xmm0
+; AVX-NEXT:    vpinsrd $3, %esi, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ins0 = insertelement <4 x i32> undef, i32   %a0, i32 0
   %ins1 = insertelement <4 x i32> %ins0, i32 undef, i32 1
   %ins2 = insertelement <4 x i32> %ins1, i32 undef, i32 2
@@ -203,14 +260,23 @@ define <4 x i32> @test_buildvector_v4i32_partial(i32 %a0, i32 %a3) {
 }
 
 define <4 x i32> @test_buildvector_v4i32_register_zero(i32 %a0, i32 %a2, i32 %a3) {
-; CHECK-LABEL: test_buildvector_v4i32_register_zero:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movd %edx, %xmm0
-; CHECK-NEXT:    movd %esi, %xmm1
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; CHECK-NEXT:    movd %edi, %xmm0
-; CHECK-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test_buildvector_v4i32_register_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd %edx, %xmm0
+; SSE-NEXT:    movd %esi, %xmm1
+; SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT:    movd %edi, %xmm0
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v4i32_register_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %edx, %xmm0
+; AVX-NEXT:    vmovd %esi, %xmm1
+; AVX-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX-NEXT:    vmovd %edi, %xmm1
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT:    retq
   %ins0 = insertelement <4 x i32> undef, i32 %a0, i32 0
   %ins1 = insertelement <4 x i32> %ins0, i32   0, i32 1
   %ins2 = insertelement <4 x i32> %ins1, i32 %a2, i32 2
@@ -219,14 +285,23 @@ define <4 x i32> @test_buildvector_v4i32_register_zero(i32 %a0, i32 %a2, i32 %a3
 }
 
 define <4 x i32> @test_buildvector_v4i32_register_zero_2(i32 %a1, i32 %a2, i32 %a3) {
-; CHECK-LABEL: test_buildvector_v4i32_register_zero_2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movd %edx, %xmm0
-; CHECK-NEXT:    movd %esi, %xmm1
-; CHECK-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; CHECK-NEXT:    movd %edi, %xmm0
-; CHECK-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,1]
-; CHECK-NEXT:    retq
+; SSE-LABEL: test_buildvector_v4i32_register_zero_2:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd %edx, %xmm0
+; SSE-NEXT:    movd %esi, %xmm1
+; SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT:    movd %edi, %xmm0
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,1]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v4i32_register_zero_2:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %edx, %xmm0
+; AVX-NEXT:    vmovd %esi, %xmm1
+; AVX-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX-NEXT:    vmovd %edi, %xmm1
+; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[1,0],xmm0[0,1]
+; AVX-NEXT:    retq
   %ins0 = insertelement <4 x i32> undef, i32   0, i32 0
   %ins1 = insertelement <4 x i32> %ins0, i32 %a1, i32 1
   %ins2 = insertelement <4 x i32> %ins1, i32 %a2, i32 2
@@ -265,6 +340,18 @@ define <8 x i16> @test_buildvector_v8i16_register(i16 %a0, i16 %a1, i16 %a2, i16
 ; SSE41-NEXT:    pinsrw $6, {{[0-9]+}}(%rsp), %xmm0
 ; SSE41-NEXT:    pinsrw $7, {{[0-9]+}}(%rsp), %xmm0
 ; SSE41-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v8i16_register:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %edi, %xmm0
+; AVX-NEXT:    vpinsrw $1, %esi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $2, %edx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $4, %r8d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $5, %r9d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ins0 = insertelement <8 x i16> undef, i16 %a0, i32 0
   %ins1 = insertelement <8 x i16> %ins0, i16 %a1, i32 1
   %ins2 = insertelement <8 x i16> %ins1, i16 %a2, i32 2
@@ -277,14 +364,23 @@ define <8 x i16> @test_buildvector_v8i16_register(i16 %a0, i16 %a1, i16 %a2, i16
 }
 
 define <8 x i16> @test_buildvector_v8i16_partial(i16 %a1, i16 %a3, i16 %a4, i16 %a5) {
-; CHECK-LABEL: test_buildvector_v8i16_partial:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pxor %xmm0, %xmm0
-; CHECK-NEXT:    pinsrw $1, %edi, %xmm0
-; CHECK-NEXT:    pinsrw $3, %esi, %xmm0
-; CHECK-NEXT:    pinsrw $4, %edx, %xmm0
-; CHECK-NEXT:    pinsrw $5, %ecx, %xmm0
-; CHECK-NEXT:    retq
+; SSE-LABEL: test_buildvector_v8i16_partial:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pxor %xmm0, %xmm0
+; SSE-NEXT:    pinsrw $1, %edi, %xmm0
+; SSE-NEXT:    pinsrw $3, %esi, %xmm0
+; SSE-NEXT:    pinsrw $4, %edx, %xmm0
+; SSE-NEXT:    pinsrw $5, %ecx, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v8i16_partial:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $1, %edi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $3, %esi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $4, %edx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $5, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ins0 = insertelement <8 x i16> undef, i16 undef, i32 0
   %ins1 = insertelement <8 x i16> %ins0, i16   %a1, i32 1
   %ins2 = insertelement <8 x i16> %ins1, i16 undef, i32 2
@@ -297,14 +393,23 @@ define <8 x i16> @test_buildvector_v8i16_partial(i16 %a1, i16 %a3, i16 %a4, i16
 }
 
 define <8 x i16> @test_buildvector_v8i16_register_zero(i16 %a0, i16 %a3, i16 %a4, i16 %a5) {
-; CHECK-LABEL: test_buildvector_v8i16_register_zero:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movzwl %di, %eax
-; CHECK-NEXT:    movd %eax, %xmm0
-; CHECK-NEXT:    pinsrw $3, %esi, %xmm0
-; CHECK-NEXT:    pinsrw $4, %edx, %xmm0
-; CHECK-NEXT:    pinsrw $5, %ecx, %xmm0
-; CHECK-NEXT:    retq
+; SSE-LABEL: test_buildvector_v8i16_register_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movzwl %di, %eax
+; SSE-NEXT:    movd %eax, %xmm0
+; SSE-NEXT:    pinsrw $3, %esi, %xmm0
+; SSE-NEXT:    pinsrw $4, %edx, %xmm0
+; SSE-NEXT:    pinsrw $5, %ecx, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v8i16_register_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movzwl %di, %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vpinsrw $3, %esi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $4, %edx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $5, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ins0 = insertelement <8 x i16> undef, i16   %a0, i32 0
   %ins1 = insertelement <8 x i16> %ins0, i16     0, i32 1
   %ins2 = insertelement <8 x i16> %ins1, i16     0, i32 2
@@ -317,14 +422,23 @@ define <8 x i16> @test_buildvector_v8i16_register_zero(i16 %a0, i16 %a3, i16 %a4
 }
 
 define <8 x i16> @test_buildvector_v8i16_register_zero_2(i16 %a1, i16 %a3, i16 %a4, i16 %a5) {
-; CHECK-LABEL: test_buildvector_v8i16_register_zero_2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pxor %xmm0, %xmm0
-; CHECK-NEXT:    pinsrw $1, %edi, %xmm0
-; CHECK-NEXT:    pinsrw $3, %esi, %xmm0
-; CHECK-NEXT:    pinsrw $4, %edx, %xmm0
-; CHECK-NEXT:    pinsrw $5, %ecx, %xmm0
-; CHECK-NEXT:    retq
+; SSE-LABEL: test_buildvector_v8i16_register_zero_2:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pxor %xmm0, %xmm0
+; SSE-NEXT:    pinsrw $1, %edi, %xmm0
+; SSE-NEXT:    pinsrw $3, %esi, %xmm0
+; SSE-NEXT:    pinsrw $4, %edx, %xmm0
+; SSE-NEXT:    pinsrw $5, %ecx, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v8i16_register_zero_2:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $1, %edi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $3, %esi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $4, %edx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrw $5, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ins0 = insertelement <8 x i16> undef, i16     0, i32 0
   %ins1 = insertelement <8 x i16> %ins0, i16   %a1, i32 1
   %ins2 = insertelement <8 x i16> %ins1, i16     0, i32 2
@@ -391,6 +505,26 @@ define <16 x i8> @test_buildvector_v16i8_register(i8 %a0, i8 %a1, i8 %a2, i8 %a3
 ; SSE41-NEXT:    pinsrb $14, {{[0-9]+}}(%rsp), %xmm0
 ; SSE41-NEXT:    pinsrb $15, {{[0-9]+}}(%rsp), %xmm0
 ; SSE41-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v16i8_register:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %edi, %xmm0
+; AVX-NEXT:    vpinsrb $1, %esi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $3, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $4, %r8d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $5, %r9d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ins0  = insertelement <16 x i8> undef,  i8 %a0,  i32 0
   %ins1  = insertelement <16 x i8> %ins0,  i8 %a1,  i32 1
   %ins2  = insertelement <16 x i8> %ins1,  i8 %a2,  i32 2
@@ -434,6 +568,17 @@ define <16 x i8> @test_buildvector_v16i8_partial(i8 %a2, i8 %a6, i8 %a8, i8 %a11
 ; SSE41-NEXT:    pinsrb $12, %r8d, %xmm0
 ; SSE41-NEXT:    pinsrb $15, %r9d, %xmm0
 ; SSE41-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v16i8_partial:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $2, %edi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $6, %esi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $8, %edx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $12, %r8d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $15, %r9d, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ins0  = insertelement <16 x i8> undef,  i8 undef, i32 0
   %ins1  = insertelement <16 x i8> %ins0,  i8 undef, i32 1
   %ins2  = insertelement <16 x i8> %ins1,  i8   %a2, i32 2
@@ -484,6 +629,18 @@ define <16 x i8> @test_buildvector_v16i8_register_zero(i8 %a0, i8 %a4, i8 %a6, i
 ; SSE41-NEXT:    pinsrb $12, %r9d, %xmm0
 ; SSE41-NEXT:    pinsrb $15, {{[0-9]+}}(%rsp), %xmm0
 ; SSE41-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v16i8_register_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movzbl %dil, %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vpinsrb $4, %esi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $6, %edx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $11, %r8d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $12, %r9d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ins0  = insertelement <16 x i8> undef,  i8   %a0, i32 0
   %ins1  = insertelement <16 x i8> %ins0,  i8     0, i32 1
   %ins2  = insertelement <16 x i8> %ins1,  i8     0, i32 2
@@ -535,6 +692,18 @@ define <16 x i8> @test_buildvector_v16i8_register_zero_2(i8 %a2, i8 %a3, i8 %a6,
 ; SSE41-NEXT:    pinsrb $12, %r9d, %xmm0
 ; SSE41-NEXT:    pinsrb $15, {{[0-9]+}}(%rsp), %xmm0
 ; SSE41-NEXT:    retq
+;
+; AVX-LABEL: test_buildvector_v16i8_register_zero_2:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $2, %edi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $3, %esi, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $6, %edx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $11, %r8d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $12, %r9d, %xmm0, %xmm0
+; AVX-NEXT:    vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ins0  = insertelement <16 x i8> undef,  i8     0, i32 0
   %ins1  = insertelement <16 x i8> %ins0,  i8     0, i32 1
   %ins2  = insertelement <16 x i8> %ins1,  i8   %a2, i32 2


        


More information about the llvm-commits mailing list