[llvm] r259410 - [X86][SSE] Add AVX512 merge consecutive load tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 1 13:30:50 PST 2016


Author: rksimon
Date: Mon Feb  1 15:30:50 2016
New Revision: 259410

URL: http://llvm.org/viewvc/llvm-project?rev=259410&view=rev
Log:
[X86][SSE] Add AVX512 merge consecutive load tests

Add AVX512F/AVX512BW 512-bit tests.

Add AVX512F tests to existing 128/256-bit tests.

Added:
    llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll
Modified:
    llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-128.ll
    llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll

Modified: llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-128.ll?rev=259410&r1=259409&r2=259410&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-128.ll Mon Feb  1 15:30:50 2016
@@ -3,6 +3,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512F
 
 define <2 x double> @merge_2f64_f64_23(double* %ptr) nounwind uwtable noinline ssp {
 ; SSE-LABEL: merge_2f64_f64_23:
@@ -73,10 +74,20 @@ define <4 x float> @merge_4f32_f32_3zuu(
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: merge_4f32_f32_3zuu:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT:    retq
+; AVX1-LABEL: merge_4f32_f32_3zuu:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: merge_4f32_f32_3zuu:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_4f32_f32_3zuu:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovss 12(%rdi), %xmm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 3
   %val0 = load float, float* %ptr0
   %res0 = insertelement <4 x float> undef, float %val0, i32 0
@@ -111,12 +122,26 @@ define <4 x float> @merge_4f32_f32_34z6(
 ; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,0]
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: merge_4f32_f32_34z6:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,0]
-; AVX-NEXT:    retq
+; AVX1-LABEL: merge_4f32_f32_34z6:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,0]
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: merge_4f32_f32_34z6:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,0]
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_4f32_f32_34z6:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT:    vmovss 24(%rdi), %xmm1
+; AVX512F-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,0]
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 3
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 4
   %ptr3 = getelementptr inbounds float, float* %ptr, i64 6
@@ -163,11 +188,23 @@ define <4 x float> @merge_4f32_f32_012u(
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
 ; SSE41-NEXT:    retq
 ;
-; AVX-LABEL: merge_4f32_f32_012u:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; AVX-NEXT:    retq
+; AVX1-LABEL: merge_4f32_f32_012u:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: merge_4f32_f32_012u:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_4f32_f32_012u:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT:    vinsertps $32, 8(%rdi), %xmm0, %xmm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 0
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 1
   %ptr2 = getelementptr inbounds float, float* %ptr, i64 2
@@ -197,11 +234,23 @@ define <4 x float> @merge_4f32_f32_019u(
 ; SSE41-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
 ; SSE41-NEXT:    retq
 ;
-; AVX-LABEL: merge_4f32_f32_019u:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; AVX-NEXT:    retq
+; AVX1-LABEL: merge_4f32_f32_019u:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: merge_4f32_f32_019u:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_4f32_f32_019u:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT:    vinsertps $32, 36(%rdi), %xmm0, %xmm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 0
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 1
   %ptr2 = getelementptr inbounds float, float* %ptr, i64 9
@@ -243,10 +292,20 @@ define <4 x i32> @merge_4i32_i32_3zuu(i3
 ; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: merge_4i32_i32_3zuu:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT:    retq
+; AVX1-LABEL: merge_4i32_i32_3zuu:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: merge_4i32_i32_3zuu:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_4i32_i32_3zuu:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovd 12(%rdi), %xmm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 3
   %val0 = load i32, i32* %ptr0
   %res0 = insertelement <4 x i32> undef, i32 %val0, i32 0

Modified: llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll?rev=259410&r1=259409&r2=259410&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-256.ll Mon Feb  1 15:30:50 2016
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512F
 
 define <4 x double> @merge_4f64_2f64_23(<2 x double>* %ptr) nounwind uwtable noinline ssp {
 ; AVX-LABEL: merge_4f64_2f64_23:
@@ -49,10 +50,20 @@ define <4 x double> @merge_4f64_f64_2345
 }
 
 define <4 x double> @merge_4f64_f64_3zuu(double* %ptr) nounwind uwtable noinline ssp {
-; AVX-LABEL: merge_4f64_f64_3zuu:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT:    retq
+; AVX1-LABEL: merge_4f64_f64_3zuu:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: merge_4f64_f64_3zuu:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_4f64_f64_3zuu:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovsd 24(%rdi), %xmm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds double, double* %ptr, i64 3
   %val0 = load double, double* %ptr0
   %res0 = insertelement <4 x double> undef, double %val0, i32 0
@@ -106,6 +117,14 @@ define <4 x double> @merge_4f64_f64_34z6
 ; AVX2-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_4f64_f64_34z6:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovdqu 24(%rdi), %xmm0
+; AVX512F-NEXT:    vmovsd 48(%rdi), %xmm1
+; AVX512F-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds double, double* %ptr, i64 3
   %ptr1 = getelementptr inbounds double, double* %ptr, i64 4
   %ptr3 = getelementptr inbounds double, double* %ptr, i64 6
@@ -153,10 +172,20 @@ define <4 x i64> @merge_4i64_i64_1234(i6
 }
 
 define <4 x i64> @merge_4i64_i64_1zzu(i64* %ptr) nounwind uwtable noinline ssp {
-; AVX-LABEL: merge_4i64_i64_1zzu:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT:    retq
+; AVX1-LABEL: merge_4i64_i64_1zzu:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: merge_4i64_i64_1zzu:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_4i64_i64_1zzu:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovq 8(%rdi), %xmm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
   %val0 = load i64, i64* %ptr0
   %res0 = insertelement <4 x i64> undef, i64 %val0, i32 0
@@ -182,13 +211,29 @@ define <4 x i64> @merge_4i64_i64_23zz(i6
 }
 
 define <8 x float> @merge_8f32_2f32_23z5(<2 x float>* %ptr) nounwind uwtable noinline ssp {
-; AVX-LABEL: merge_8f32_2f32_23z5:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmovupd 16(%rdi), %xmm0
-; AVX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; AVX-NEXT:    vmovhpd 40(%rdi), %xmm1, %xmm1
-; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: merge_8f32_2f32_23z5:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmovupd 16(%rdi), %xmm0
+; AVX1-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vmovhpd 40(%rdi), %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: merge_8f32_2f32_23z5:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovupd 16(%rdi), %xmm0
+; AVX2-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vmovhpd 40(%rdi), %xmm1, %xmm1
+; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_8f32_2f32_23z5:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovups 16(%rdi), %xmm0
+; AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT:    vmovhpd 40(%rdi), %xmm1, %xmm1
+; AVX512F-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 2
   %ptr1 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 3
   %ptr3 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 5
@@ -227,6 +272,13 @@ define <8 x float> @merge_8f32_f32_12zzu
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_8f32_f32_12zzuuzz:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 1
   %ptr1 = getelementptr inbounds float, float* %ptr, i64 2
   %val0 = load float, float* %ptr0
@@ -241,15 +293,35 @@ define <8 x float> @merge_8f32_f32_12zzu
 }
 
 define <8 x float> @merge_8f32_f32_1u3u5zu8(float* %ptr) nounwind uwtable noinline ssp {
-; AVX-LABEL: merge_8f32_f32_1u3u5zu8:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm2[0,1],xmm0[1,0]
-; AVX-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: merge_8f32_f32_1u3u5zu8:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm2[0,1],xmm0[1,0]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: merge_8f32_f32_1u3u5zu8:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vshufps {{.*#+}} xmm0 = xmm2[0,1],xmm0[1,0]
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3]
+; AVX2-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_8f32_f32_1u3u5zu8:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovss 32(%rdi), %xmm0
+; AVX512F-NEXT:    vmovss 20(%rdi), %xmm1
+; AVX512F-NEXT:    vshufps {{.*#+}} xmm0 = xmm1[0,1],xmm0[1,0]
+; AVX512F-NEXT:    vmovss 4(%rdi), %xmm1
+; AVX512F-NEXT:    vinsertps $32, 12(%rdi), %xmm1, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds float, float* %ptr, i64 1
   %ptr2 = getelementptr inbounds float, float* %ptr, i64 3
   %ptr4 = getelementptr inbounds float, float* %ptr, i64 5
@@ -292,6 +364,13 @@ define <8 x i32> @merge_8i32_i32_56zz9uz
 ; AVX2-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_8i32_i32_56zz9uzz:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT:    vmovd 36(%rdi), %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 5
   %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 6
   %ptr4 = getelementptr inbounds i32, i32* %ptr, i64 9
@@ -330,6 +409,17 @@ define <8 x i32> @merge_8i32_i32_1u3u5zu
 ; AVX2-NEXT:    vpinsrd $2, 12(%rdi), %xmm1, %xmm1
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_8i32_i32_1u3u5zu8:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovd 20(%rdi), %xmm0
+; AVX512F-NEXT:    vmovd 32(%rdi), %xmm1
+; AVX512F-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,1,0]
+; AVX512F-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512F-NEXT:    vmovd 4(%rdi), %xmm1
+; AVX512F-NEXT:    vpinsrd $2, 12(%rdi), %xmm1, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 1
   %ptr2 = getelementptr inbounds i32, i32* %ptr, i64 3
   %ptr4 = getelementptr inbounds i32, i32* %ptr, i64 5
@@ -362,6 +452,14 @@ define <16 x i16> @merge_16i16_i16_89zzz
 ; AVX2-NEXT:    vpinsrw $1, 18(%rdi), %xmm1, %xmm1
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT:    vpinsrw $0, 16(%rdi), %xmm0, %xmm1
+; AVX512F-NEXT:    vpinsrw $1, 18(%rdi), %xmm1, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 8
   %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 9
   %val0 = load i16, i16* %ptr0
@@ -435,6 +533,16 @@ define <16 x i16> @merge_16i16_i16_0uu3z
 ; AVX2-NEXT:    vpinsrw $7, 30(%rdi), %xmm1, %xmm1
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpinsrw $4, 24(%rdi), %xmm1, %xmm1
+; AVX512F-NEXT:    vpinsrw $6, 28(%rdi), %xmm1, %xmm1
+; AVX512F-NEXT:    vpinsrw $7, 30(%rdi), %xmm1, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
   %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0
   %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3
   %ptrC = getelementptr inbounds i16, i16* %ptr, i64 12
@@ -479,23 +587,32 @@ define <32 x i8> @merge_32i8_i8_23u5uuuu
 ; AVX1-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu:
 ; AVX1:       # BB#0:
 ; AVX1-NEXT:    vpxor %xmm0, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $0, 4(%rdi), %xmm0, %xmm1
-; AVX1-NEXT:    vpinsrb $1, 5(%rdi), %xmm1, %xmm1
-; AVX1-NEXT:    vpinsrb $3, 7(%rdi), %xmm1, %xmm1
+; AVX1-NEXT:    vpinsrb $0, 2(%rdi), %xmm0, %xmm1
+; AVX1-NEXT:    vpinsrb $1, 3(%rdi), %xmm1, %xmm1
+; AVX1-NEXT:    vpinsrb $3, 5(%rdi), %xmm1, %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu:
 ; AVX2:       # BB#0:
 ; AVX2-NEXT:    vpxor %xmm0, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $0, 4(%rdi), %xmm0, %xmm1
-; AVX2-NEXT:    vpinsrb $1, 5(%rdi), %xmm1, %xmm1
-; AVX2-NEXT:    vpinsrb $3, 7(%rdi), %xmm1, %xmm1
+; AVX2-NEXT:    vpinsrb $0, 2(%rdi), %xmm0, %xmm1
+; AVX2-NEXT:    vpinsrb $1, 3(%rdi), %xmm1, %xmm1
+; AVX2-NEXT:    vpinsrb $3, 5(%rdi), %xmm1, %xmm1
 ; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
-  %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 4
-  %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 5
-  %ptr3 = getelementptr inbounds i8, i8* %ptr, i64 7
+;
+; AVX512F-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT:    vpinsrb $0, 2(%rdi), %xmm0, %xmm1
+; AVX512F-NEXT:    vpinsrb $1, 3(%rdi), %xmm1, %xmm1
+; AVX512F-NEXT:    vpinsrb $3, 5(%rdi), %xmm1, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+  %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 2
+  %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 3
+  %ptr3 = getelementptr inbounds i8, i8* %ptr, i64 5
   %val0 = load i8, i8* %ptr0
   %val1 = load i8, i8* %ptr1
   %val3 = load i8, i8* %ptr3

Added: llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll?rev=259410&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll (added)
+++ llvm/trunk/test/CodeGen/X86/merge-consecutive-loads-512.ll Mon Feb  1 15:30:50 2016
@@ -0,0 +1,526 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW
+
+define <8 x double> @merge_8f64_2f64_12u4(<2 x double>* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_8f64_2f64_12u4:
+; ALL:       # BB#0:
+; ALL-NEXT:    vinsertf128 $1, 64(%rdi), %ymm0, %ymm0
+; ALL-NEXT:    vinsertf64x4 $0, 16(%rdi), %zmm0, %zmm1
+; ALL-NEXT:    vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 1
+  %ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2
+  %ptr3 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 4
+  %val0 = load <2 x double>, <2 x double>* %ptr0
+  %val1 = load <2 x double>, <2 x double>* %ptr1
+  %val3 = load <2 x double>, <2 x double>* %ptr3
+  %res01 = shufflevector <2 x double> %val0, <2 x double> %val1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res23 = shufflevector <2 x double> undef, <2 x double> %val3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = shufflevector <4 x double> %res01, <4 x double> %res23, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x double> %res
+}
+
+define <8 x double> @merge_8f64_2f64_23z5(<2 x double>* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_8f64_2f64_23z5:
+; ALL:       # BB#0:
+; ALL-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
+; ALL-NEXT:    vinsertf128 $1, 80(%rdi), %ymm0, %ymm0
+; ALL-NEXT:    vinsertf64x4 $0, 32(%rdi), %zmm0, %zmm1
+; ALL-NEXT:    vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2
+  %ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 3
+  %ptr3 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 5
+  %val0 = load <2 x double>, <2 x double>* %ptr0
+  %val1 = load <2 x double>, <2 x double>* %ptr1
+  %val3 = load <2 x double>, <2 x double>* %ptr3
+  %res01 = shufflevector <2 x double> %val0, <2 x double> %val1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res23 = shufflevector <2 x double> zeroinitializer, <2 x double> %val3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = shufflevector <4 x double> %res01, <4 x double> %res23, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x double> %res
+}
+
+define <8 x double> @merge_8f64_4f64_z2(<4 x double>* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_8f64_4f64_z2:
+; ALL:       # BB#0:
+; ALL-NEXT:    vxorpd %ymm0, %ymm0, %ymm0
+; ALL-NEXT:    vinsertf64x4 $1, 64(%rdi), %zmm0, %zmm0
+; ALL-NEXT:    retq
+  %ptr1 = getelementptr inbounds <4 x double>, <4 x double>* %ptr, i64 2
+  %val1 = load <4 x double>, <4 x double>* %ptr1
+  %res = shufflevector <4 x double> zeroinitializer, <4 x double> %val1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x double> %res
+}
+
+define <8 x double> @merge_8f64_f64_23uuuuu9(double* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_8f64_f64_23uuuuu9:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovupd 16(%rdi), %zmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds double, double* %ptr, i64 2
+  %ptr1 = getelementptr inbounds double, double* %ptr, i64 3
+  %ptr7 = getelementptr inbounds double, double* %ptr, i64 9
+  %val0 = load double, double* %ptr0
+  %val1 = load double, double* %ptr1
+  %val7 = load double, double* %ptr7
+  %res0 = insertelement <8 x double> undef, double %val0, i32 0
+  %res1 = insertelement <8 x double> %res0, double %val1, i32 1
+  %res7 = insertelement <8 x double> %res1, double %val7, i32 7
+  ret <8 x double> %res7
+}
+
+define <8 x double> @merge_8f64_f64_12zzuuzz(double* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_8f64_f64_12zzuuzz:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovupd 8(%rdi), %xmm0
+; ALL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; ALL-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds double, double* %ptr, i64 1
+  %ptr1 = getelementptr inbounds double, double* %ptr, i64 2
+  %val0 = load double, double* %ptr0
+  %val1 = load double, double* %ptr1
+  %res0 = insertelement <8 x double> undef, double %val0, i32 0
+  %res1 = insertelement <8 x double> %res0, double %val1, i32 1
+  %res2 = insertelement <8 x double> %res1, double   0.0, i32 2
+  %res3 = insertelement <8 x double> %res2, double   0.0, i32 3
+  %res6 = insertelement <8 x double> %res3, double   0.0, i32 6
+  %res7 = insertelement <8 x double> %res6, double   0.0, i32 7
+  ret <8 x double> %res7
+}
+
+define <8 x double> @merge_8f64_f64_1u3u5zu8(double* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_8f64_f64_1u3u5zu8:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovsd 40(%rdi), %xmm0
+; ALL-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vmovsd 8(%rdi), %xmm1
+; ALL-NEXT:    vmovsd 24(%rdi), %xmm2
+; ALL-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT:    vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds double, double* %ptr, i64 1
+  %ptr2 = getelementptr inbounds double, double* %ptr, i64 3
+  %ptr4 = getelementptr inbounds double, double* %ptr, i64 5
+  %ptr7 = getelementptr inbounds double, double* %ptr, i64 8
+  %val0 = load double, double* %ptr0
+  %val2 = load double, double* %ptr2
+  %val4 = load double, double* %ptr4
+  %val7 = load double, double* %ptr7
+  %res0 = insertelement <8 x double> undef, double %val0, i32 0
+  %res2 = insertelement <8 x double> %res0, double %val2, i32 2
+  %res4 = insertelement <8 x double> %res2, double %val4, i32 4
+  %res5 = insertelement <8 x double> %res4, double   0.0, i32 5
+  %res7 = insertelement <8 x double> %res5, double %val7, i32 7
+  ret <8 x double> %res7
+}
+
+define <8 x i64> @merge_8i64_4i64_z3(<4 x i64>* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_8i64_4i64_z3:
+; ALL:       # BB#0:
+; ALL-NEXT:    vpxor %ymm0, %ymm0, %ymm0
+; ALL-NEXT:    vinserti64x4 $1, 96(%rdi), %zmm0, %zmm0
+; ALL-NEXT:    retq
+  %ptr1 = getelementptr inbounds <4 x i64>, <4 x i64>* %ptr, i64 3
+  %val1 = load <4 x i64>, <4 x i64>* %ptr1
+  %res = shufflevector <4 x i64> zeroinitializer, <4 x i64> %val1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x i64> %res
+}
+
+define <8 x i64> @merge_8i64_i64_56zz9uzz(i64* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_8i64_i64_56zz9uzz:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovdqu 40(%rdi), %xmm0
+; ALL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; ALL-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vmovq 72(%rdi), %xmm1
+; ALL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 5
+  %ptr1 = getelementptr inbounds i64, i64* %ptr, i64 6
+  %ptr4 = getelementptr inbounds i64, i64* %ptr, i64 9
+  %val0 = load i64, i64* %ptr0
+  %val1 = load i64, i64* %ptr1
+  %val4 = load i64, i64* %ptr4
+  %res0 = insertelement <8 x i64> undef, i64 %val0, i32 0
+  %res1 = insertelement <8 x i64> %res0, i64 %val1, i32 1
+  %res2 = insertelement <8 x i64> %res1, i64     0, i32 2
+  %res3 = insertelement <8 x i64> %res2, i64     0, i32 3
+  %res4 = insertelement <8 x i64> %res3, i64 %val4, i32 4
+  %res6 = insertelement <8 x i64> %res4, i64     0, i32 6
+  %res7 = insertelement <8 x i64> %res6, i64     0, i32 7
+  ret <8 x i64> %res7
+}
+
+define <8 x i64> @merge_8i64_i64_1u3u5zu8(i64* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_8i64_i64_1u3u5zu8:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovq 40(%rdi), %xmm0
+; ALL-NEXT:    vpbroadcastq 64(%rdi), %xmm1
+; ALL-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vmovq 8(%rdi), %xmm1
+; ALL-NEXT:    vmovq 24(%rdi), %xmm2
+; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
+  %ptr2 = getelementptr inbounds i64, i64* %ptr, i64 3
+  %ptr4 = getelementptr inbounds i64, i64* %ptr, i64 5
+  %ptr7 = getelementptr inbounds i64, i64* %ptr, i64 8
+  %val0 = load i64, i64* %ptr0
+  %val2 = load i64, i64* %ptr2
+  %val4 = load i64, i64* %ptr4
+  %val7 = load i64, i64* %ptr7
+  %res0 = insertelement <8 x i64> undef, i64 %val0, i32 0
+  %res2 = insertelement <8 x i64> %res0, i64 %val2, i32 2
+  %res4 = insertelement <8 x i64> %res2, i64 %val4, i32 4
+  %res5 = insertelement <8 x i64> %res4, i64     0, i32 5
+  %res7 = insertelement <8 x i64> %res5, i64 %val7, i32 7
+  ret <8 x i64> %res7
+}
+
+define <16 x float> @merge_16f32_f32_89zzzuuuuuuuuuuuz(float* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_16f32_f32_89zzzuuuuuuuuuuuz:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; ALL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; ALL-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds float, float* %ptr, i64 8
+  %ptr1 = getelementptr inbounds float, float* %ptr, i64 9
+  %val0 = load float, float* %ptr0
+  %val1 = load float, float* %ptr1
+  %res0 = insertelement <16 x float> undef, float %val0, i32 0
+  %res1 = insertelement <16 x float> %res0, float %val1, i32 1
+  %res2 = insertelement <16 x float> %res1, float   0.0, i32 2
+  %res3 = insertelement <16 x float> %res2, float   0.0, i32 3
+  %res4 = insertelement <16 x float> %res3, float   0.0, i32 4
+  %resF = insertelement <16 x float> %res4, float   0.0, i32 15
+  ret <16 x float> %resF
+}
+
+define <16 x float> @merge_16f32_f32_45u7uuuuuuuuuuuu(float* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_16f32_f32_45u7uuuuuuuuuuuu:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovups 16(%rdi), %xmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds float, float* %ptr, i64 4
+  %ptr1 = getelementptr inbounds float, float* %ptr, i64 5
+  %ptr3 = getelementptr inbounds float, float* %ptr, i64 7
+  %val0 = load float, float* %ptr0
+  %val1 = load float, float* %ptr1
+  %val3 = load float, float* %ptr3
+  %res0 = insertelement <16 x float> undef, float %val0, i32 0
+  %res1 = insertelement <16 x float> %res0, float %val1, i32 1
+  %res3 = insertelement <16 x float> %res1, float %val3, i32 3
+  ret <16 x float> %res3
+}
+
+define <16 x float> @merge_16f32_f32_0uu3uuuuuuuuCuEF(float* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_16f32_f32_0uu3uuuuuuuuCuEF:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovups (%rdi), %zmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds float, float* %ptr, i64 0
+  %ptr3 = getelementptr inbounds float, float* %ptr, i64 3
+  %ptrC = getelementptr inbounds float, float* %ptr, i64 12
+  %ptrE = getelementptr inbounds float, float* %ptr, i64 14
+  %ptrF = getelementptr inbounds float, float* %ptr, i64 15
+  %val0 = load float, float* %ptr0
+  %val3 = load float, float* %ptr3
+  %valC = load float, float* %ptrC
+  %valE = load float, float* %ptrE
+  %valF = load float, float* %ptrF
+  %res0 = insertelement <16 x float> undef, float %val0, i32 0
+  %res3 = insertelement <16 x float> %res0, float %val3, i32 3
+  %resC = insertelement <16 x float> %res3, float %valC, i32 12
+  %resE = insertelement <16 x float> %resC, float %valE, i32 14
+  %resF = insertelement <16 x float> %resE, float %valF, i32 15
+  ret <16 x float> %resF
+}
+
+define <16 x float> @merge_16f32_f32_0uu3zzuuuuuzCuEF(float* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_16f32_f32_0uu3zzuuuuuzCuEF:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; ALL-NEXT:    vmovss 48(%rdi), %xmm1
+; ALL-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; ALL-NEXT:    vmovupd (%rdi), %xmm1
+; ALL-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; ALL-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT:    vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds float, float* %ptr, i64 0
+  %ptr3 = getelementptr inbounds float, float* %ptr, i64 3
+  %ptrC = getelementptr inbounds float, float* %ptr, i64 12
+  %ptrE = getelementptr inbounds float, float* %ptr, i64 14
+  %ptrF = getelementptr inbounds float, float* %ptr, i64 15
+  %val0 = load float, float* %ptr0
+  %val3 = load float, float* %ptr3
+  %valC = load float, float* %ptrC
+  %valE = load float, float* %ptrE
+  %valF = load float, float* %ptrF
+  %res0 = insertelement <16 x float> undef, float %val0, i32 0
+  %res3 = insertelement <16 x float> %res0, float %val3, i32 3
+  %res4 = insertelement <16 x float> %res3, float   0.0, i32 4
+  %res5 = insertelement <16 x float> %res4, float   0.0, i32 5
+  %resC = insertelement <16 x float> %res5, float %valC, i32 12
+  %resD = insertelement <16 x float> %resC, float   0.0, i32 13
+  %resE = insertelement <16 x float> %resD, float %valE, i32 14
+  %resF = insertelement <16 x float> %resE, float %valF, i32 15
+  ret <16 x float> %resF
+}
+
+define <16 x i32> @merge_16i32_i32_12zzzuuuuuuuuuuuz(i32* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_16i32_i32_12zzzuuuuuuuuuuuz:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; ALL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; ALL-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; ALL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 1
+  %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 2
+  %val0 = load i32, i32* %ptr0
+  %val1 = load i32, i32* %ptr1
+  %res0 = insertelement <16 x i32> undef, i32 %val0, i32 0
+  %res1 = insertelement <16 x i32> %res0, i32 %val1, i32 1
+  %res2 = insertelement <16 x i32> %res1, i32     0, i32 2
+  %res3 = insertelement <16 x i32> %res2, i32     0, i32 3
+  %res4 = insertelement <16 x i32> %res3, i32     0, i32 4
+  %resF = insertelement <16 x i32> %res4, i32     0, i32 15
+  ret <16 x i32> %resF
+}
+
+define <16 x i32> @merge_16i32_i32_23u5uuuuuuuuuuuu(i32* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_16i32_i32_23u5uuuuuuuuuuuu:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovups 8(%rdi), %xmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 2
+  %ptr1 = getelementptr inbounds i32, i32* %ptr, i64 3
+  %ptr3 = getelementptr inbounds i32, i32* %ptr, i64 5
+  %val0 = load i32, i32* %ptr0
+  %val1 = load i32, i32* %ptr1
+  %val3 = load i32, i32* %ptr3
+  %res0 = insertelement <16 x i32> undef, i32 %val0, i32 0
+  %res1 = insertelement <16 x i32> %res0, i32 %val1, i32 1
+  %res3 = insertelement <16 x i32> %res1, i32 %val3, i32 3
+  ret <16 x i32> %res3
+}
+
+define <16 x i32> @merge_16i32_i32_0uu3uuuuuuuuCuEF(i32* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovdqu32 (%rdi), %zmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0
+  %ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3
+  %ptrC = getelementptr inbounds i32, i32* %ptr, i64 12
+  %ptrE = getelementptr inbounds i32, i32* %ptr, i64 14
+  %ptrF = getelementptr inbounds i32, i32* %ptr, i64 15
+  %val0 = load i32, i32* %ptr0
+  %val3 = load i32, i32* %ptr3
+  %valC = load i32, i32* %ptrC
+  %valE = load i32, i32* %ptrE
+  %valF = load i32, i32* %ptrF
+  %res0 = insertelement <16 x i32> undef, i32 %val0, i32 0
+  %res3 = insertelement <16 x i32> %res0, i32 %val3, i32 3
+  %resC = insertelement <16 x i32> %res3, i32 %valC, i32 12
+  %resE = insertelement <16 x i32> %resC, i32 %valE, i32 14
+  %resF = insertelement <16 x i32> %resE, i32 %valF, i32 15
+  ret <16 x i32> %resF
+}
+
+define <16 x i32> @merge_16i32_i32_0uu3zzuuuuuzCuEF(i32* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; ALL-NEXT:    vmovd 48(%rdi), %xmm1
+; ALL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; ALL-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; ALL-NEXT:    vmovdqu (%rdi), %xmm1
+; ALL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0
+  %ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3
+  %ptrC = getelementptr inbounds i32, i32* %ptr, i64 12
+  %ptrE = getelementptr inbounds i32, i32* %ptr, i64 14
+  %ptrF = getelementptr inbounds i32, i32* %ptr, i64 15
+  %val0 = load i32, i32* %ptr0
+  %val3 = load i32, i32* %ptr3
+  %valC = load i32, i32* %ptrC
+  %valE = load i32, i32* %ptrE
+  %valF = load i32, i32* %ptrF
+  %res0 = insertelement <16 x i32> undef, i32 %val0, i32 0
+  %res3 = insertelement <16 x i32> %res0, i32 %val3, i32 3
+  %res4 = insertelement <16 x i32> %res3, i32     0, i32 4
+  %res5 = insertelement <16 x i32> %res4, i32     0, i32 5
+  %resC = insertelement <16 x i32> %res5, i32 %valC, i32 12
+  %resD = insertelement <16 x i32> %resC, i32     0, i32 13
+  %resE = insertelement <16 x i32> %resD, i32 %valE, i32 14
+  %resF = insertelement <16 x i32> %resE, i32 %valF, i32 15
+  ret <16 x i32> %resF
+}
+
+define <32 x i16> @merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz(i16* %ptr) nounwind uwtable noinline ssp {
+; AVX512F-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
+; AVX512BW:       # BB#0:
+; AVX512BW-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512BW-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+  %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 1
+  %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 2
+  %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 4
+  %val0 = load i16, i16* %ptr0
+  %val1 = load i16, i16* %ptr1
+  %val3 = load i16, i16* %ptr3
+  %res0 = insertelement <32 x i16> undef, i16 %val0, i16 0
+  %res1 = insertelement <32 x i16> %res0, i16 %val1, i16 1
+  %res3 = insertelement <32 x i16> %res1, i16 %val3, i16 3
+  %res30 = insertelement <32 x i16> %res3, i16 0, i16 30
+  %res31 = insertelement <32 x i16> %res30, i16 0, i16 31
+  ret <32 x i16> %res31
+}
+
+define <32 x i16> @merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
+; ALL-LABEL: merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
+; ALL:       # BB#0:
+; ALL-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; ALL-NEXT:    retq
+  %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 4
+  %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 5
+  %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 7
+  %val0 = load i16, i16* %ptr0
+  %val1 = load i16, i16* %ptr1
+  %val3 = load i16, i16* %ptr3
+  %res0 = insertelement <32 x i16> undef, i16 %val0, i16 0
+  %res1 = insertelement <32 x i16> %res0, i16 %val1, i16 1
+  %res3 = insertelement <32 x i16> %res1, i16 %val3, i16 3
+  ret <32 x i16> %res3
+}
+
+define <32 x i16> @merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
+; AVX512F-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT:    vpinsrw $0, 4(%rdi), %xmm0, %xmm1
+; AVX512F-NEXT:    vpinsrw $1, 6(%rdi), %xmm1, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
+; AVX512BW:       # BB#0:
+; AVX512BW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpinsrw $0, 4(%rdi), %xmm0, %xmm1
+; AVX512BW-NEXT:    vpinsrw $1, 6(%rdi), %xmm1, %xmm1
+; AVX512BW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512BW-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+  %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 2
+  %ptr1 = getelementptr inbounds i16, i16* %ptr, i64 3
+  %val0 = load i16, i16* %ptr0
+  %val1 = load i16, i16* %ptr1
+  %res0 = insertelement <32 x i16> undef, i16 %val0, i16 0
+  %res1 = insertelement <32 x i16> %res0, i16 %val1, i16 1
+  %res3 = insertelement <32 x i16> %res1, i16     0, i16 3
+  %resE = insertelement <32 x i16> %res3, i16     0, i16 14
+  %resF = insertelement <32 x i16> %resE, i16     0, i16 15
+  %resG = insertelement <32 x i16> %resF, i16     0, i16 16
+  %resH = insertelement <32 x i16> %resG, i16     0, i16 17
+  ret <32 x i16> %resH
+}
+
+define <64 x i8> @merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz(i8* %ptr) nounwind uwtable noinline ssp {
+; AVX512F-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
+; AVX512BW:       # BB#0:
+; AVX512BW-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+  %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 1
+  %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 2
+  %ptr3 = getelementptr inbounds i8, i8* %ptr, i64 4
+  %ptr7 = getelementptr inbounds i8, i8* %ptr, i64 8
+  %val0 = load i8, i8* %ptr0
+  %val1 = load i8, i8* %ptr1
+  %val3 = load i8, i8* %ptr3
+  %val7 = load i8, i8* %ptr7
+  %res0  = insertelement <64 x i8> undef,  i8 %val0, i8 0
+  %res1  = insertelement <64 x i8> %res0,  i8 %val1, i8 1
+  %res3  = insertelement <64 x i8> %res1,  i8 %val3, i8 3
+  %res7  = insertelement <64 x i8> %res3,  i8 %val7, i8 7
+  %res14 = insertelement <64 x i8> %res7,  i8     0, i8 14
+  %res15 = insertelement <64 x i8> %res14, i8     0, i8 15
+  %res16 = insertelement <64 x i8> %res15, i8     0, i8 16
+  %res17 = insertelement <64 x i8> %res16, i8     0, i8 17
+  %res63 = insertelement <64 x i8> %res17, i8     0, i8 63
+  ret <64 x i8> %res63
+}
+
+define <64 x i8> @merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz(i8* %ptr) nounwind uwtable noinline ssp {
+; AVX512F-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT:    vpinsrb $0, 1(%rdi), %xmm0, %xmm1
+; AVX512F-NEXT:    vpinsrb $1, 2(%rdi), %xmm1, %xmm1
+; AVX512F-NEXT:    vpinsrb $3, 4(%rdi), %xmm1, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
+; AVX512BW:       # BB#0:
+; AVX512BW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpinsrb $0, 1(%rdi), %xmm0, %xmm1
+; AVX512BW-NEXT:    vpinsrb $1, 2(%rdi), %xmm1, %xmm1
+; AVX512BW-NEXT:    vpinsrb $3, 4(%rdi), %xmm1, %xmm1
+; AVX512BW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512BW-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+  %ptr0 = getelementptr inbounds i8, i8* %ptr, i64 1
+  %ptr1 = getelementptr inbounds i8, i8* %ptr, i64 2
+  %ptr3 = getelementptr inbounds i8, i8* %ptr, i64 4
+  %val0 = load i8, i8* %ptr0
+  %val1 = load i8, i8* %ptr1
+  %val3 = load i8, i8* %ptr3
+  %res0  = insertelement <64 x i8> undef,  i8 %val0, i8 0
+  %res1  = insertelement <64 x i8> %res0,  i8 %val1, i8 1
+  %res3  = insertelement <64 x i8> %res1,  i8 %val3, i8 3
+  %res14 = insertelement <64 x i8> %res3,  i8     0, i8 14
+  %res15 = insertelement <64 x i8> %res14, i8     0, i8 15
+  %res16 = insertelement <64 x i8> %res15, i8     0, i8 16
+  %res17 = insertelement <64 x i8> %res16, i8     0, i8 17
+  %res63 = insertelement <64 x i8> %res17, i8     0, i8 63
+  ret <64 x i8> %res63
+}




More information about the llvm-commits mailing list