[llvm] r307028 - [x86] auto-generate complete checks for tests; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 3 08:04:05 PDT 2017


Author: spatel
Date: Mon Jul  3 08:04:05 2017
New Revision: 307028

URL: http://llvm.org/viewvc/llvm-project?rev=307028&view=rev
Log:
[x86] auto-generate complete checks for tests; NFC

These all used 'CHECK-NOT' which isn't necessary if we have complete checks.
There were also several over-specifications in the RUN params such as CPU model or OS requirement

Modified:
    llvm/trunk/test/CodeGen/X86/avx-cmp.ll
    llvm/trunk/test/CodeGen/X86/avx-load-store.ll
    llvm/trunk/test/CodeGen/X86/avx-unpack.ll
    llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll

Modified: llvm/trunk/test/CodeGen/X86/avx-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-cmp.ll?rev=307028&r1=307027&r2=307028&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-cmp.ll Mon Jul  3 08:04:05 2017
@@ -1,25 +1,59 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
 
-; CHECK: vcmpltps %ymm
-; CHECK-NOT: vucomiss
-define <8 x i32> @cmp00(<8 x float> %a, <8 x float> %b) nounwind readnone {
+define <8 x i32> @cmp00(<8 x float> %a, <8 x float> %b) nounwind {
+; CHECK-LABEL: cmp00:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vcmpltps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %bincmp = fcmp olt <8 x float> %a, %b
   %s = sext <8 x i1> %bincmp to <8 x i32>
   ret <8 x i32> %s
 }
 
-; CHECK: vcmpltpd %ymm
-; CHECK-NOT: vucomisd
-define <4 x i64> @cmp01(<4 x double> %a, <4 x double> %b) nounwind readnone {
+define <4 x i64> @cmp01(<4 x double> %a, <4 x double> %b) nounwind {
+; CHECK-LABEL: cmp01:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vcmpltpd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %bincmp = fcmp olt <4 x double> %a, %b
   %s = sext <4 x i1> %bincmp to <4 x i64>
   ret <4 x i64> %s
 }
 
-declare void @scale() nounwind uwtable
+declare void @scale() nounwind
 
-; CHECK: vucomisd
-define void @render() nounwind uwtable {
+define void @render() nounwind {
+; CHECK-LABEL: render:
+; CHECK:       # BB#0: # %entry
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    testb %al, %al
+; CHECK-NEXT:    jne .LBB2_6
+; CHECK-NEXT:  # BB#1: # %for.cond5.preheader
+; CHECK-NEXT:    xorl %ebx, %ebx
+; CHECK-NEXT:    jmp .LBB2_2
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB2_5: # %if.then
+; CHECK-NEXT:    # in Loop: Header=BB2_2 Depth=1
+; CHECK-NEXT:    callq scale
+; CHECK-NEXT:  .LBB2_2: # %for.cond5
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    testb %bl, %bl
+; CHECK-NEXT:    jne .LBB2_2
+; CHECK-NEXT:  # BB#3: # %for.cond5
+; CHECK-NEXT:    # in Loop: Header=BB2_2 Depth=1
+; CHECK-NEXT:    testb %bl, %bl
+; CHECK-NEXT:    je .LBB2_2
+; CHECK-NEXT:  # BB#4: # %for.body33
+; CHECK-NEXT:    # in Loop: Header=BB2_2 Depth=1
+; CHECK-NEXT:    vucomisd {{\.LCPI.*}}, %xmm0
+; CHECK-NEXT:    jne .LBB2_5
+; CHECK-NEXT:    jp .LBB2_5
+; CHECK-NEXT:    jmp .LBB2_2
+; CHECK-NEXT:  .LBB2_6: # %for.end52
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    retq
 entry:
   br i1 undef, label %for.cond5, label %for.end52
 
@@ -42,89 +76,113 @@ for.end52:
   ret void
 }
 
-; CHECK: vextractf128  $1
-; CHECK: vextractf128  $1
-; CHECK-NEXT: vpcmpgtd  %xmm
-; CHECK-NEXT: vpcmpgtd  %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <8 x i32> @int256-cmp(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+define <8 x i32> @int256_cmp(<8 x i32> %i, <8 x i32> %j) nounwind {
+; CHECK-LABEL: int256_cmp:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; CHECK-NEXT:    vpcmpgtd %xmm2, %xmm3, %xmm2
+; CHECK-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %bincmp = icmp slt <8 x i32> %i, %j
   %x = sext <8 x i1> %bincmp to <8 x i32>
   ret <8 x i32> %x
 }
 
-; CHECK: vextractf128  $1
-; CHECK: vextractf128  $1
-; CHECK-NEXT: vpcmpgtq  %xmm
-; CHECK-NEXT: vpcmpgtq  %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <4 x i64> @v4i64-cmp(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+define <4 x i64> @v4i64_cmp(<4 x i64> %i, <4 x i64> %j) nounwind {
+; CHECK-LABEL: v4i64_cmp:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; CHECK-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; CHECK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %bincmp = icmp slt <4 x i64> %i, %j
   %x = sext <4 x i1> %bincmp to <4 x i64>
   ret <4 x i64> %x
 }
 
-; CHECK: vextractf128  $1
-; CHECK: vextractf128  $1
-; CHECK-NEXT: vpcmpgtw  %xmm
-; CHECK-NEXT: vpcmpgtw  %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <16 x i16> @v16i16-cmp(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
+define <16 x i16> @v16i16_cmp(<16 x i16> %i, <16 x i16> %j) nounwind {
+; CHECK-LABEL: v16i16_cmp:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; CHECK-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm2
+; CHECK-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %bincmp = icmp slt <16 x i16> %i, %j
   %x = sext <16 x i1> %bincmp to <16 x i16>
   ret <16 x i16> %x
 }
 
-; CHECK: vextractf128  $1
-; CHECK: vextractf128  $1
-; CHECK-NEXT: vpcmpgtb  %xmm
-; CHECK-NEXT: vpcmpgtb  %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <32 x i8> @v32i8-cmp(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
+define <32 x i8> @v32i8_cmp(<32 x i8> %i, <32 x i8> %j) nounwind {
+; CHECK-LABEL: v32i8_cmp:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; CHECK-NEXT:    vpcmpgtb %xmm2, %xmm3, %xmm2
+; CHECK-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %bincmp = icmp slt <32 x i8> %i, %j
   %x = sext <32 x i1> %bincmp to <32 x i8>
   ret <32 x i8> %x
 }
 
-; CHECK: vextractf128  $1
-; CHECK: vextractf128  $1
-; CHECK-NEXT: vpcmpeqd  %xmm
-; CHECK-NEXT: vpcmpeqd  %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <8 x i32> @int256-cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+define <8 x i32> @int256_cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind {
+; CHECK-LABEL: int256_cmpeq:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm3, %xmm2
+; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %bincmp = icmp eq <8 x i32> %i, %j
   %x = sext <8 x i1> %bincmp to <8 x i32>
   ret <8 x i32> %x
 }
 
-; CHECK: vextractf128  $1
-; CHECK: vextractf128  $1
-; CHECK-NEXT: vpcmpeqq  %xmm
-; CHECK-NEXT: vpcmpeqq  %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <4 x i64> @v4i64-cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+define <4 x i64> @v4i64_cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind {
+; CHECK-LABEL: v4i64_cmpeq:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT:    vpcmpeqq %xmm2, %xmm3, %xmm2
+; CHECK-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %bincmp = icmp eq <4 x i64> %i, %j
   %x = sext <4 x i1> %bincmp to <4 x i64>
   ret <4 x i64> %x
 }
 
-; CHECK: vextractf128  $1
-; CHECK: vextractf128  $1
-; CHECK-NEXT: vpcmpeqw  %xmm
-; CHECK-NEXT: vpcmpeqw  %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <16 x i16> @v16i16-cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
+define <16 x i16> @v16i16_cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind {
+; CHECK-LABEL: v16i16_cmpeq:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT:    vpcmpeqw %xmm2, %xmm3, %xmm2
+; CHECK-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %bincmp = icmp eq <16 x i16> %i, %j
   %x = sext <16 x i1> %bincmp to <16 x i16>
   ret <16 x i16> %x
 }
 
-; CHECK: vextractf128  $1
-; CHECK: vextractf128  $1
-; CHECK-NEXT: vpcmpeqb  %xmm
-; CHECK-NEXT: vpcmpeqb  %xmm
-; CHECK-NEXT: vinsertf128 $1
-define <32 x i8> @v32i8-cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
+define <32 x i8> @v32i8_cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind {
+; CHECK-LABEL: v32i8_cmpeq:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT:    vpcmpeqb %xmm2, %xmm3, %xmm2
+; CHECK-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %bincmp = icmp eq <32 x i8> %i, %j
   %x = sext <32 x i1> %bincmp to <32 x i8>
   ret <32 x i8> %x
@@ -132,17 +190,28 @@ define <32 x i8> @v32i8-cmpeq(<32 x i8>
 
 ;; Scalar comparison
 
-; CHECK: scalarcmpA
-; CHECK: vcmpeqsd
 define i32 @scalarcmpA() uwtable ssp {
+; CHECK-LABEL: scalarcmpA:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vcmpeqsd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vmovq %xmm0, %rax
+; CHECK-NEXT:    andl $1, %eax
+; CHECK-NEXT:    # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; CHECK-NEXT:    retq
   %cmp29 = fcmp oeq double undef, 0.000000e+00
   %res = zext i1 %cmp29 to i32
   ret i32 %res
 }
 
-; CHECK: scalarcmpB
-; CHECK: vcmpeqss
 define i32 @scalarcmpB() uwtable ssp {
+; CHECK-LABEL: scalarcmpB:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vcmpeqss %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vmovd %xmm0, %eax
+; CHECK-NEXT:    andl $1, %eax
+; CHECK-NEXT:    retq
   %cmp29 = fcmp oeq float undef, 0.000000e+00
   %res = zext i1 %cmp29 to i32
   ret i32 %res

Modified: llvm/trunk/test/CodeGen/X86/avx-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-load-store.ll?rev=307028&r1=307027&r2=307028&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-load-store.ll Mon Jul  3 08:04:05 2017
@@ -1,13 +1,62 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
-; RUN: llc -O0 < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s -check-prefix=CHECK_O0
-
-; CHECK: vmovaps
-; CHECK: vmovaps
-; CHECK: vmovaps
-; CHECK: vmovaps
-; CHECK: vmovaps
-; CHECK: vmovaps
-define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind uwtable ssp {
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 | FileCheck %s
+; RUN: llc -O0 < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 | FileCheck %s -check-prefix=CHECK_O0
+
+define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind {
+; CHECK-LABEL: test_256_load:
+; CHECK:       # BB#0: # %entry
+; CHECK-NEXT:    pushq %r15
+; CHECK-NEXT:    pushq %r14
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    subq $96, %rsp
+; CHECK-NEXT:    movq %rdx, %r14
+; CHECK-NEXT:    movq %rsi, %r15
+; CHECK-NEXT:    movq %rdi, %rbx
+; CHECK-NEXT:    vmovaps (%rbx), %ymm0
+; CHECK-NEXT:    vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT:    vmovaps (%r15), %ymm1
+; CHECK-NEXT:    vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT:    vmovaps (%r14), %ymm2
+; CHECK-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
+; CHECK-NEXT:    callq dummy
+; CHECK-NEXT:    vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
+; CHECK-NEXT:    vmovaps %ymm0, (%rbx)
+; CHECK-NEXT:    vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
+; CHECK-NEXT:    vmovaps %ymm0, (%r15)
+; CHECK-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
+; CHECK-NEXT:    vmovaps %ymm0, (%r14)
+; CHECK-NEXT:    addq $96, %rsp
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    popq %r14
+; CHECK-NEXT:    popq %r15
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+;
+; CHECK_O0-LABEL: test_256_load:
+; CHECK_O0:       # BB#0: # %entry
+; CHECK_O0-NEXT:    subq $152, %rsp
+; CHECK_O0-NEXT:    vmovapd (%rdi), %ymm0
+; CHECK_O0-NEXT:    vmovaps (%rsi), %ymm1
+; CHECK_O0-NEXT:    vmovdqa (%rdx), %ymm2
+; CHECK_O0-NEXT:    vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK_O0-NEXT:    vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK_O0-NEXT:    vmovups %ymm2, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK_O0-NEXT:    movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
+; CHECK_O0-NEXT:    movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
+; CHECK_O0-NEXT:    movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; CHECK_O0-NEXT:    callq dummy
+; CHECK_O0-NEXT:    movq {{[0-9]+}}(%rsp), %rdx # 8-byte Reload
+; CHECK_O0-NEXT:    vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
+; CHECK_O0-NEXT:    vmovapd %ymm0, (%rdx)
+; CHECK_O0-NEXT:    movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; CHECK_O0-NEXT:    vmovups {{[0-9]+}}(%rsp), %ymm1 # 32-byte Reload
+; CHECK_O0-NEXT:    vmovaps %ymm1, (%rsi)
+; CHECK_O0-NEXT:    movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; CHECK_O0-NEXT:    vmovups {{[0-9]+}}(%rsp), %ymm2 # 32-byte Reload
+; CHECK_O0-NEXT:    vmovdqa %ymm2, (%rdi)
+; CHECK_O0-NEXT:    addq $152, %rsp
+; CHECK_O0-NEXT:    vzeroupper
+; CHECK_O0-NEXT:    retq
 entry:
   %0 = bitcast double* %d to <4 x double>*
   %tmp1.i = load <4 x double>, <4 x double>* %0, align 32
@@ -27,62 +76,115 @@ declare void @dummy(<4 x double>, <8 x f
 ;; The two tests below check that we must fold load + scalar_to_vector
 ;; + ins_subvec+ zext into only a single vmovss or vmovsd or vinsertps from memory
 
-; CHECK: mov00
 define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
+; CHECK-LABEL: mov00:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    retq
+;
+; CHECK_O0-LABEL: mov00:
+; CHECK_O0:       # BB#0:
+; CHECK_O0-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK_O0-NEXT:    # implicit-def: %YMM1
+; CHECK_O0-NEXT:    vmovaps %xmm0, %xmm1
+; CHECK_O0-NEXT:    vxorps %ymm2, %ymm2, %ymm2
+; CHECK_O0-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3,4,5,6,7]
+; CHECK_O0-NEXT:    retq
   %val = load float, float* %ptr
-; CHECK: vmovss (%
   %i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0
   ret <8 x float> %i0
-; CHECK: ret
 }
 
-; CHECK: mov01
 define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
+; CHECK-LABEL: mov01:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    retq
+;
+; CHECK_O0-LABEL: mov01:
+; CHECK_O0:       # BB#0:
+; CHECK_O0-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK_O0-NEXT:    # implicit-def: %YMM1
+; CHECK_O0-NEXT:    vmovaps %xmm0, %xmm1
+; CHECK_O0-NEXT:    vxorps %ymm2, %ymm2, %ymm2
+; CHECK_O0-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3]
+; CHECK_O0-NEXT:    retq
   %val = load double, double* %ptr
-; CHECK: vmovsd (%
   %i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0
   ret <4 x double> %i0
-; CHECK: ret
 }
 
-; CHECK: vmovaps  %ymm
 define void @storev16i16(<16 x i16> %a) nounwind {
+; CHECK-LABEL: storev16i16:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovaps %ymm0, (%rax)
+;
+; CHECK_O0-LABEL: storev16i16:
+; CHECK_O0:       # BB#0:
+; CHECK_O0-NEXT:    # implicit-def: %RAX
+; CHECK_O0-NEXT:    vmovdqa %ymm0, (%rax)
   store <16 x i16> %a, <16 x i16>* undef, align 32
   unreachable
 }
 
-; CHECK: storev16i16_01
-; CHECK: vextractf128
-; CHECK: vmovups  %xmm
 define void @storev16i16_01(<16 x i16> %a) nounwind {
+; CHECK-LABEL: storev16i16_01:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rax)
+; CHECK-NEXT:    vmovups %xmm0, (%rax)
+;
+; CHECK_O0-LABEL: storev16i16_01:
+; CHECK_O0:       # BB#0:
+; CHECK_O0-NEXT:    # implicit-def: %RAX
+; CHECK_O0-NEXT:    vmovdqu %ymm0, (%rax)
   store <16 x i16> %a, <16 x i16>* undef, align 4
   unreachable
 }
 
-; CHECK: storev32i8
-; CHECK: vmovaps  %ymm
 define void @storev32i8(<32 x i8> %a) nounwind {
+; CHECK-LABEL: storev32i8:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovaps %ymm0, (%rax)
+;
+; CHECK_O0-LABEL: storev32i8:
+; CHECK_O0:       # BB#0:
+; CHECK_O0-NEXT:    # implicit-def: %RAX
+; CHECK_O0-NEXT:    vmovdqa %ymm0, (%rax)
   store <32 x i8> %a, <32 x i8>* undef, align 32
   unreachable
 }
 
-; CHECK: storev32i8_01
-; CHECK: vextractf128
-; CHECK: vmovups  %xmm
 define void @storev32i8_01(<32 x i8> %a) nounwind {
+; CHECK-LABEL: storev32i8_01:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rax)
+; CHECK-NEXT:    vmovups %xmm0, (%rax)
+;
+; CHECK_O0-LABEL: storev32i8_01:
+; CHECK_O0:       # BB#0:
+; CHECK_O0-NEXT:    # implicit-def: %RAX
+; CHECK_O0-NEXT:    vmovdqu %ymm0, (%rax)
   store <32 x i8> %a, <32 x i8>* undef, align 4
   unreachable
 }
 
 ; It is faster to make two saves, if the data is already in XMM registers. For
 ; example, after making an integer operation.
-; CHECK: _double_save
-; CHECK-NOT: vinsertf128 $1
-; CHECK-NOT: vinsertf128 $0
-; CHECK: vmovaps %xmm
-; CHECK: vmovaps %xmm
 define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
-entry:
+; CHECK-LABEL: double_save:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovaps %xmm1, 16(%rdi)
+; CHECK-NEXT:    vmovaps %xmm0, (%rdi)
+; CHECK-NEXT:    retq
+;
+; CHECK_O0-LABEL: double_save:
+; CHECK_O0:       # BB#0:
+; CHECK_O0-NEXT:    # implicit-def: %YMM2
+; CHECK_O0-NEXT:    vmovaps %xmm0, %xmm2
+; CHECK_O0-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm2
+; CHECK_O0-NEXT:    vmovdqu %ymm2, (%rdi)
+; CHECK_O0-NEXT:    vzeroupper
+; CHECK_O0-NEXT:    retq
   %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   store <8 x i32> %Z, <8 x i32>* %P, align 16
   ret void
@@ -90,60 +192,127 @@ entry:
 
 declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwind
 
-; CHECK_O0: _f_f
-; CHECK-O0: vmovss LCPI
-; CHECK-O0: vxorps  %xmm
-; CHECK-O0: vmovss %xmm
 define void @f_f() nounwind {
+; CHECK-LABEL: f_f:
+; CHECK:       # BB#0: # %allocas
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    testb %al, %al
+; CHECK-NEXT:    jne .LBB8_2
+; CHECK-NEXT:  # BB#1: # %cif_mask_all
+; CHECK-NEXT:  .LBB8_2: # %cif_mask_mixed
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    testb %al, %al
+; CHECK-NEXT:    jne .LBB8_4
+; CHECK-NEXT:  # BB#3: # %cif_mixed_test_all
+; CHECK-NEXT:    movl $-1, %eax
+; CHECK-NEXT:    vmovd %eax, %xmm0
+; CHECK-NEXT:    vmaskmovps %ymm0, %ymm0, (%rax)
+; CHECK-NEXT:  .LBB8_4: # %cif_mixed_test_any_check
+;
+; CHECK_O0-LABEL: f_f:
+; CHECK_O0:       # BB#0: # %allocas
+; CHECK_O0-NEXT:    # implicit-def: %AL
+; CHECK_O0-NEXT:    testb $1, %al
+; CHECK_O0-NEXT:    jne .LBB8_1
+; CHECK_O0-NEXT:    jmp .LBB8_2
+; CHECK_O0-NEXT:  .LBB8_1: # %cif_mask_all
+; CHECK_O0-NEXT:  .LBB8_2: # %cif_mask_mixed
+; CHECK_O0-NEXT:    # implicit-def: %AL
+; CHECK_O0-NEXT:    testb $1, %al
+; CHECK_O0-NEXT:    jne .LBB8_3
+; CHECK_O0-NEXT:    jmp .LBB8_4
+; CHECK_O0-NEXT:  .LBB8_3: # %cif_mixed_test_all
+; CHECK_O0-NEXT:    movl $-1, %eax
+; CHECK_O0-NEXT:    vmovd %eax, %xmm0
+; CHECK_O0-NEXT:    vmovaps %xmm0, %xmm1
+; CHECK_O0-NEXT:    # implicit-def: %RCX
+; CHECK_O0-NEXT:    # implicit-def: %YMM2
+; CHECK_O0-NEXT:    vmaskmovps %ymm2, %ymm1, (%rcx)
+; CHECK_O0-NEXT:  .LBB8_4: # %cif_mixed_test_any_check
 allocas:
   br i1 undef, label %cif_mask_all, label %cif_mask_mixed
 
-cif_mask_all:                                     ; preds = %allocas
+cif_mask_all:
   unreachable
 
-cif_mask_mixed:                                   ; preds = %allocas
+cif_mask_mixed:
   br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check
 
-cif_mixed_test_all:                               ; preds = %cif_mask_mixed
+cif_mixed_test_all:
   call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x i32> <i32 -1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <8 x float> undef) nounwind
   unreachable
 
-cif_mixed_test_any_check:                         ; preds = %cif_mask_mixed
+cif_mixed_test_any_check:
   unreachable
 }
 
-; CHECK: add8i32
-; CHECK: vmovups
-; CHECK: vmovups
-; CHECK-NOT: vinsertf128
-; CHECK-NOT: vextractf128
-; CHECK: vmovups
-; CHECK: vmovups
 define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
+; CHECK-LABEL: add8i32:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovups (%rsi), %xmm0
+; CHECK-NEXT:    vmovups 16(%rsi), %xmm1
+; CHECK-NEXT:    vmovups %xmm1, 16(%rdi)
+; CHECK-NEXT:    vmovups %xmm0, (%rdi)
+; CHECK-NEXT:    retq
+;
+; CHECK_O0-LABEL: add8i32:
+; CHECK_O0:       # BB#0:
+; CHECK_O0-NEXT:    vmovdqu (%rsi), %xmm0
+; CHECK_O0-NEXT:    vmovdqu 16(%rsi), %xmm1
+; CHECK_O0-NEXT:    # implicit-def: %YMM2
+; CHECK_O0-NEXT:    vmovaps %xmm0, %xmm2
+; CHECK_O0-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm2
+; CHECK_O0-NEXT:    vmovdqu %ymm2, (%rdi)
+; CHECK_O0-NEXT:    vzeroupper
+; CHECK_O0-NEXT:    retq
   %b = load <8 x i32>, <8 x i32>* %bp, align 1
   %x = add <8 x i32> zeroinitializer, %b
   store <8 x i32> %x, <8 x i32>* %ret, align 1
   ret void
 }
 
-; CHECK: add4i64a64
-; CHECK: vmovaps ({{.*}}), %ymm{{.*}}
-; CHECK: vmovaps %ymm{{.*}}, ({{.*}})
 define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
+; CHECK-LABEL: add4i64a64:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovaps (%rsi), %ymm0
+; CHECK-NEXT:    vmovaps %ymm0, (%rdi)
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+;
+; CHECK_O0-LABEL: add4i64a64:
+; CHECK_O0:       # BB#0:
+; CHECK_O0-NEXT:    vmovaps (%rsi), %ymm0
+; CHECK_O0-NEXT:    vmovdqa %ymm0, (%rdi)
+; CHECK_O0-NEXT:    vzeroupper
+; CHECK_O0-NEXT:    retq
   %b = load <4 x i64>, <4 x i64>* %bp, align 64
   %x = add <4 x i64> zeroinitializer, %b
   store <4 x i64> %x, <4 x i64>* %ret, align 64
   ret void
 }
 
-; CHECK: add4i64a16
-; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
-; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
-; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
-; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
 define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
+; CHECK-LABEL: add4i64a16:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovaps (%rsi), %xmm0
+; CHECK-NEXT:    vmovaps 16(%rsi), %xmm1
+; CHECK-NEXT:    vmovaps %xmm1, 16(%rdi)
+; CHECK-NEXT:    vmovaps %xmm0, (%rdi)
+; CHECK-NEXT:    retq
+;
+; CHECK_O0-LABEL: add4i64a16:
+; CHECK_O0:       # BB#0:
+; CHECK_O0-NEXT:    vmovdqa (%rsi), %xmm0
+; CHECK_O0-NEXT:    vmovdqa 16(%rsi), %xmm1
+; CHECK_O0-NEXT:    # implicit-def: %YMM2
+; CHECK_O0-NEXT:    vmovaps %xmm0, %xmm2
+; CHECK_O0-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm2
+; CHECK_O0-NEXT:    vmovdqu %ymm2, (%rdi)
+; CHECK_O0-NEXT:    vzeroupper
+; CHECK_O0-NEXT:    retq
   %b = load <4 x i64>, <4 x i64>* %bp, align 16
   %x = add <4 x i64> zeroinitializer, %b
   store <4 x i64> %x, <4 x i64>* %ret, align 16
   ret void
 }
+

Modified: llvm/trunk/test/CodeGen/X86/avx-unpack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-unpack.ll?rev=307028&r1=307027&r2=307028&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-unpack.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-unpack.ll Mon Jul  3 08:04:05 2017
@@ -1,57 +1,84 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s
 
-; CHECK: vunpckhps
 define <8 x float> @unpackhips(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhips:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
   ret <8 x float> %shuffle.i
 }
 
-; CHECK: vunpckhpd
 define <4 x double> @unpackhipd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhipd:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
   ret <4 x double> %shuffle.i
 }
 
-; CHECK: vunpcklps
 define <8 x float> @unpacklops(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklops:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
   ret <8 x float> %shuffle.i
 }
 
-; CHECK: vunpcklpd
 define <4 x double> @unpacklopd(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklopd:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
   ret <4 x double> %shuffle.i
 }
 
-; CHECK-NOT: vunpcklps %ymm
-define <8 x float> @unpacklops-not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
-entry:
+define <8 x float> @unpacklops_not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
+; CHECK-LABEL: unpacklops_not:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vunpckhps {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; CHECK-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
   ret <8 x float> %shuffle.i
 }
 
-; CHECK-NOT: vunpcklpd %ymm
-define <4 x double> @unpacklopd-not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
-entry:
+define <4 x double> @unpacklopd_not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
+; CHECK-LABEL: unpacklopd_not:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
+; CHECK-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
   ret <4 x double> %shuffle.i
 }
 
-; CHECK-NOT: vunpckhps %ymm
-define <8 x float> @unpackhips-not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
-entry:
+define <8 x float> @unpackhips_not(<8 x float> %src1, <8 x float> %src2) nounwind uwtable readnone ssp {
+; CHECK-LABEL: unpackhips_not:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[u,2,u,3,u,4,u,5]
+; CHECK-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,u,3,u,4,u,5,u]
+; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <8 x float> %src1, <8 x float> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13>
   ret <8 x float> %shuffle.i
 }
 
-; CHECK-NOT: vunpckhpd %ymm
-define <4 x double> @unpackhipd-not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
-entry:
+define <4 x double> @unpackhipd_not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp {
+; CHECK-LABEL: unpackhipd_not:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1]
+; CHECK-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
   ret <4 x double> %shuffle.i
 }
@@ -60,102 +87,135 @@ entry:
 ;;;; Unpack versions using the fp unit for int unpacking
 ;;;;
 
-; CHECK: vunpckhps
 define <8 x i32> @unpackhips1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhips1:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <8 x i32> %src1, <8 x i32> %src2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
   ret <8 x i32> %shuffle.i
 }
 
-; CHECK: vunpckhps (%
 define <8 x i32> @unpackhips2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhips2:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovaps (%rdi), %ymm0
+; CHECK-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
+; CHECK-NEXT:    retq
   %a = load <8 x i32>, <8 x i32>* %src1
   %b = load <8 x i32>, <8 x i32>* %src2
   %shuffle.i = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
   ret <8 x i32> %shuffle.i
 }
 
-; CHECK: vunpckhpd
 define <4 x i64> @unpackhipd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhipd1:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <4 x i64> %src1, <4 x i64> %src2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
   ret <4 x i64> %shuffle.i
 }
 
-; CHECK: vunpckhpd (%
 define <4 x i64> @unpackhipd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhipd2:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovapd (%rdi), %ymm0
+; CHECK-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
+; CHECK-NEXT:    retq
   %a = load <4 x i64>, <4 x i64>* %src1
   %b = load <4 x i64>, <4 x i64>* %src2
   %shuffle.i = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
   ret <4 x i64> %shuffle.i
 }
 
-; CHECK: vunpcklps
 define <8 x i32> @unpacklops1(<8 x i32> %src1, <8 x i32> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklops1:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <8 x i32> %src1, <8 x i32> %src2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
   ret <8 x i32> %shuffle.i
 }
 
-; CHECK: vunpcklps (%
 define <8 x i32> @unpacklops2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklops2:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovaps (%rdi), %ymm0
+; CHECK-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; CHECK-NEXT:    retq
   %a = load <8 x i32>, <8 x i32>* %src1
   %b = load <8 x i32>, <8 x i32>* %src2
   %shuffle.i = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
   ret <8 x i32> %shuffle.i
 }
 
-; CHECK: vunpcklpd
 define <4 x i64> @unpacklopd1(<4 x i64> %src1, <4 x i64> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklopd1:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <4 x i64> %src1, <4 x i64> %src2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
   ret <4 x i64> %shuffle.i
 }
 
-; CHECK: vunpcklpd (%
 define <4 x i64> @unpacklopd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklopd2:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vmovapd (%rdi), %ymm0
+; CHECK-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
+; CHECK-NEXT:    retq
   %a = load <4 x i64>, <4 x i64>* %src1
   %b = load <4 x i64>, <4 x i64>* %src2
   %shuffle.i = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
   ret <4 x i64> %shuffle.i
 }
 
-; CHECK: vpunpckhwd
-; CHECK: vpunpckhwd
-; CHECK: vinsertf128
 define <16 x i16> @unpackhwd_undef(<16 x i16> %src1) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhwd_undef:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
+; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <16 x i16> %src1, <16 x i16> %src1, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
   ret <16 x i16> %shuffle.i
 }
 
-; CHECK: vpunpcklwd
-; CHECK: vpunpcklwd
-; CHECK: vinsertf128
 define <16 x i16> @unpacklwd_undef(<16 x i16> %src1) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklwd_undef:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3]
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <16 x i16> %src1, <16 x i16> %src1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27>
   ret <16 x i16> %shuffle.i
 }
 
-; CHECK: vpunpckhbw
-; CHECK: vpunpckhbw
-; CHECK: vinsertf128
 define <32 x i8> @unpackhbw_undef(<32 x i8> %src1, <32 x i8> %src2) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpackhbw_undef:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <32 x i8> %src1, <32 x i8> %src1, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
   ret <32 x i8> %shuffle.i
 }
 
-; CHECK: vpunpcklbw
-; CHECK: vpunpcklbw
-; CHECK: vinsertf128
 define <32 x i8> @unpacklbw_undef(<32 x i8> %src1) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: unpacklbw_undef:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
   %shuffle.i = shufflevector <32 x i8> %src1, <32 x i8> %src1, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
   ret <32 x i8> %shuffle.i
 }
+

Modified: llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll?rev=307028&r1=307027&r2=307028&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll Mon Jul  3 08:04:05 2017
@@ -1,30 +1,37 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s
 
-; CHECK-LABEL: A:
-; CHECK-NOT: vunpck
-; CHECK: vinsertf128 $1
 define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: A:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 8, i32 8, i32 8, i32 8, i32 0, i32 1, i32 2, i32 3>
   ret <8 x float> %shuffle
 }
 
-; CHECK-LABEL: B:
-; CHECK-NOT: vunpck
-; CHECK: vinsertf128 $1
 define <4 x double> @B(<4 x double> %a) nounwind uwtable readnone ssp {
-entry:
+; CHECK-LABEL: B:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 4, i32 4, i32 0, i32 1>
   ret <4 x double> %shuffle
 }
 
 declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
-
 declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone
 
-; Just check that no crash happens
-; CHECK-LABEL: _insert_crash:
 define void @insert_crash() nounwind {
+; CHECK-LABEL: insert_crash:
+; CHECK:       # BB#0: # %allocas
+; CHECK-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vminpd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vminsd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; CHECK-NEXT:    vmovups %xmm0, (%rax)
+; CHECK-NEXT:    retq
 allocas:
   %v1.i.i451 = shufflevector <4 x double> zeroinitializer, <4 x double> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
   %ret_0a.i.i.i452 = shufflevector <4 x double> %v1.i.i451, <4 x double> undef, <2 x i32> <i32 0, i32 1>
@@ -40,72 +47,87 @@ allocas:
 
 ;; DAG Combine must remove useless vinsertf128 instructions
 
-; CHECK-LABEL: DAGCombineA:
-; CHECK-NOT: vinsertf128 $1
 define <4 x i32> @DAGCombineA(<4 x i32> %v1) nounwind readonly {
-  %1 = shufflevector <4 x i32> %v1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  %2 = shufflevector <8 x i32> %1, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  ret <4 x i32> %2
+; CHECK-LABEL: DAGCombineA:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    retq
+  %t1 = shufflevector <4 x i32> %v1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %t2 = shufflevector <8 x i32> %t1, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i32> %t2
 }
 
-; CHECK-LABEL: DAGCombineB:
-; CHECK: vpaddd %xmm
-; CHECK-NOT: vinsertf128  $1
-; CHECK: vpaddd %xmm
 define <8 x i32> @DAGCombineB(<8 x i32> %v1, <8 x i32> %v2) nounwind readonly {
-  %1 = add <8 x i32> %v1, %v2
-  %2 = add <8 x i32> %1, %v1
-  ret <8 x i32> %2
+; CHECK-LABEL: DAGCombineB:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
+; CHECK-NEXT:    vpaddd %xmm2, %xmm3, %xmm2
+; CHECK-NEXT:    vpaddd %xmm0, %xmm1, %xmm1
+; CHECK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
+  %t1 = add <8 x i32> %v1, %v2
+  %t2 = add <8 x i32> %t1, %v1
+  ret <8 x i32> %t2
 }
 
-; CHECK-LABEL: insert_undef_pd:
 define <4 x double> @insert_undef_pd(<4 x double> %a0, <2 x double> %a1) {
-; CHECK: vmovaps	%ymm1, %ymm0
+; CHECK-LABEL: insert_undef_pd:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; CHECK-NEXT:    vmovaps %ymm1, %ymm0
+; CHECK-NEXT:    retq
 %res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> undef, <2 x double> %a1, i8 0)
 ret <4 x double> %res
 }
 declare <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double>, <2 x double>, i8) nounwind readnone
 
-
-; CHECK-LABEL: insert_undef_ps:
 define <8 x float> @insert_undef_ps(<8 x float> %a0, <4 x float> %a1) {
-; CHECK: vmovaps	%ymm1, %ymm0
+; CHECK-LABEL: insert_undef_ps:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; CHECK-NEXT:    vmovaps %ymm1, %ymm0
+; CHECK-NEXT:    retq
 %res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %a1, i8 0)
 ret <8 x float> %res
 }
 declare <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float>, <4 x float>, i8) nounwind readnone
 
-
-; CHECK-LABEL: insert_undef_si:
 define <8 x i32> @insert_undef_si(<8 x i32> %a0, <4 x i32> %a1) {
-; CHECK: vmovaps	%ymm1, %ymm0
+; CHECK-LABEL: insert_undef_si:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; CHECK-NEXT:    vmovaps %ymm1, %ymm0
+; CHECK-NEXT:    retq
 %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> undef, <4 x i32> %a1, i8 0)
 ret <8 x i32> %res
 }
 declare <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32>, <4 x i32>, i8) nounwind readnone
 
 ; rdar://10643481
-; CHECK-LABEL: vinsertf128_combine:
 define <8 x float> @vinsertf128_combine(float* nocapture %f) nounwind uwtable readonly ssp {
-; CHECK-NOT: vmovaps
-; CHECK: vinsertf128
-entry:
+; CHECK-LABEL: vinsertf128_combine:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %add.ptr = getelementptr inbounds float, float* %f, i64 4
-  %0 = bitcast float* %add.ptr to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 16
-  %2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %1, i8 1)
-  ret <8 x float> %2
+  %t0 = bitcast float* %add.ptr to <4 x float>*
+  %t1 = load <4 x float>, <4 x float>* %t0, align 16
+  %t2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %t1, i8 1)
+  ret <8 x float> %t2
 }
 
 ; rdar://11076953
-; CHECK-LABEL: vinsertf128_ucombine:
 define <8 x float> @vinsertf128_ucombine(float* nocapture %f) nounwind uwtable readonly ssp {
-; CHECK-NOT: vmovups
-; CHECK: vinsertf128
-entry:
+; CHECK-LABEL: vinsertf128_ucombine:
+; CHECK:       # BB#0:
+; CHECK-NEXT:    vinsertf128 $1, 16(%rdi), %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %add.ptr = getelementptr inbounds float, float* %f, i64 4
-  %0 = bitcast float* %add.ptr to <4 x float>*
-  %1 = load <4 x float>, <4 x float>* %0, align 8
-  %2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %1, i8 1)
-  ret <8 x float> %2
+  %t0 = bitcast float* %add.ptr to <4 x float>*
+  %t1 = load <4 x float>, <4 x float>* %t0, align 8
+  %t2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %t1, i8 1)
+  ret <8 x float> %t2
 }
+




More information about the llvm-commits mailing list