[llvm] r369469 - [X86] Autogenerate vec_* tests. NFC

Amaury Sechet via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 20 16:11:29 PDT 2019


Author: deadalnix
Date: Tue Aug 20 16:11:29 2019
New Revision: 369469

URL: http://llvm.org/viewvc/llvm-project?rev=369469&view=rev
Log:
[X86] Autogenerate vec_* tests. NFC

Modified:
    llvm/trunk/test/CodeGen/X86/vec_align.ll
    llvm/trunk/test/CodeGen/X86/vec_align_i256.ll
    llvm/trunk/test/CodeGen/X86/vec_anyext.ll
    llvm/trunk/test/CodeGen/X86/vec_call.ll
    llvm/trunk/test/CodeGen/X86/vec_round.ll
    llvm/trunk/test/CodeGen/X86/vec_shuf-insert.ll
    llvm/trunk/test/CodeGen/X86/vec_split.ll
    llvm/trunk/test/CodeGen/X86/vec_trunc_sext.ll
    llvm/trunk/test/CodeGen/X86/vec_udiv_to_shift.ll
    llvm/trunk/test/CodeGen/X86/vec_zero-2.ll

Modified: llvm/trunk/test/CodeGen/X86/vec_align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_align.ll?rev=369469&r1=369468&r2=369469&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_align.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_align.ll Tue Aug 20 16:11:29 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mcpu=yonah -relocation-model=static | grep movaps | count 2
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
@@ -20,7 +21,7 @@ define %f4 @test2() nounwind {
 	%Xp = getelementptr { float,float,float,float}, { float,float,float,float}* @G, i32 0, i32 1
 	%Yp = getelementptr { float,float,float,float}, { float,float,float,float}* @G, i32 0, i32 2
 	%Zp = getelementptr { float,float,float,float}, { float,float,float,float}* @G, i32 0, i32 3
-	
+
 	%W = load float, float* %Wp
 	%X = load float, float* %Xp
 	%Y = load float, float* %Yp

Modified: llvm/trunk/test/CodeGen/X86/vec_align_i256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_align_i256.ll?rev=369469&r1=369468&r2=369469&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_align_i256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_align_i256.ll Tue Aug 20 16:11:29 2019
@@ -1,14 +1,16 @@
-; RUN: llc < %s -mcpu=corei7-avx | FileCheck %s 
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mcpu=corei7-avx | FileCheck %s
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
 target triple = "i686-apple-darwin8"
 
 ; Make sure that we are not generating a movaps because the vector is aligned to 1.
-;CHECK: @foo
-;CHECK: xor
-;CHECK-NEXT: vmovups
-;CHECK-NEXT: ret
 define void @foo() {
+; CHECK-LABEL: foo:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vmovups %xmm0, (%eax)
+; CHECK-NEXT:    retl
   store <16 x i16> zeroinitializer, <16 x i16>* undef, align 1
   ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/vec_anyext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_anyext.ll?rev=369469&r1=369468&r2=369469&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_anyext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_anyext.ll Tue Aug 20 16:11:29 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64--
 ; PR 9267
 

Modified: llvm/trunk/test/CodeGen/X86/vec_call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_call.ll?rev=369469&r1=369468&r2=369469&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_call.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_call.ll Tue Aug 20 16:11:29 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mcpu=generic -mattr=+sse2 -mtriple=i686-apple-darwin8 | \
 ; RUN:   grep "subl.*60"
 ; RUN: llc < %s -mcpu=generic -mattr=+sse2 -mtriple=i686-apple-darwin8 | \

Modified: llvm/trunk/test/CodeGen/X86/vec_round.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_round.ll?rev=369469&r1=369468&r2=369469&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_round.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_round.ll Tue Aug 20 16:11:29 2019
@@ -1,14 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mcpu=nehalem -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
 declare void @use(<2 x double>)
 
-; CHECK-LABEL: @test
-; CHECK: callq round
-
 ; Function Attrs: nounwind uwtable
 define void @test() {
+; CHECK-LABEL: test:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    callq round
+; CHECK-NEXT:    movddup {{.*#+}} xmm0 = xmm0[0,0]
+; CHECK-NEXT:    callq use
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
 entry:
   %tmp = call <2 x double> @llvm.round.v2f64(<2 x double> undef)
   call void @use(<2 x double> %tmp)

Modified: llvm/trunk/test/CodeGen/X86/vec_shuf-insert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_shuf-insert.ll?rev=369469&r1=369468&r2=369469&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_shuf-insert.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_shuf-insert.ll Tue Aug 20 16:11:29 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-linux -mcpu=corei7-avx | FileCheck %s
 
 ; These tests check that an insert_subvector which replaces one of the halves
@@ -7,23 +8,25 @@
 declare <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float>, <4 x float>, i8)
 
 define <8 x float> @lower_half(<4 x float> %v1, <4 x float> %v2, <4 x float> %v3) {
+; CHECK-LABEL: lower_half:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $xmm2 killed $xmm2 def $ymm2
+; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm0
+; CHECK-NEXT:    retq
   %1 = shufflevector <4 x float> %v1, <4 x float> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %1, <4 x float> %v3, i8 0)
   ret <8 x float> %2
 
-; CHECK-LABEL: lower_half
-; CHECK-NOT: vinsertf128
-; CHECK: vinsertf128 $1, %xmm1, %ymm2, %ymm0
-; CHECK-NEXT: ret
 }
 
 define <8 x float> @upper_half(<4 x float> %v1, <4 x float> %v2, <4 x float> %v3) {
+; CHECK-LABEL: upper_half:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %1 = shufflevector <4 x float> %v1, <4 x float> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %1, <4 x float> %v3, i8 1)
   ret <8 x float> %2
 
-; CHECK-LABEL: upper_half
-; CHECK-NOT: vinsertf128
-; CHECK: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; CHECK-NEXT: ret
 }

Modified: llvm/trunk/test/CodeGen/X86/vec_split.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_split.ll?rev=369469&r1=369468&r2=369469&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_split.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_split.ll Tue Aug 20 16:11:29 2019
@@ -1,19 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=x86_64-- -mattr=sse4.1 < %s | FileCheck %s -check-prefix=SSE4
 ; RUN: llc -mtriple=x86_64-- -mattr=avx < %s | FileCheck %s -check-prefix=AVX1
 ; RUN: llc -mtriple=x86_64-- -mattr=avx2 < %s | FileCheck %s -check-prefix=AVX2
 
 define <16 x i16> @split16(<16 x i16> %a, <16 x i16> %b, <16 x i8> %__mask) {
 ; SSE4-LABEL: split16:
-; SSE4: pminuw
-; SSE4: pminuw
-; SSE4: ret
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    pminuw %xmm2, %xmm0
+; SSE4-NEXT:    pminuw %xmm3, %xmm1
+; SSE4-NEXT:    retq
+;
 ; AVX1-LABEL: split16:
-; AVX1: vpminuw
-; AVX1: vpminuw
-; AVX1: ret
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpminuw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpminuw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
 ; AVX2-LABEL: split16:
-; AVX2: vpminuw
-; AVX2: ret
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminuw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
   %1 = icmp ult <16 x i16> %a, %b
   %2 = select <16 x i1> %1, <16 x i16> %a, <16 x i16> %b
   ret <16 x i16> %2
@@ -21,21 +30,32 @@ define <16 x i16> @split16(<16 x i16> %a
 
 define <32 x i16> @split32(<32 x i16> %a, <32 x i16> %b, <32 x i8> %__mask) {
 ; SSE4-LABEL: split32:
-; SSE4: pminuw
-; SSE4: pminuw
-; SSE4: pminuw
-; SSE4: pminuw
-; SSE4: ret
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    pminuw %xmm4, %xmm0
+; SSE4-NEXT:    pminuw %xmm5, %xmm1
+; SSE4-NEXT:    pminuw %xmm6, %xmm2
+; SSE4-NEXT:    pminuw %xmm7, %xmm3
+; SSE4-NEXT:    retq
+;
 ; AVX1-LABEL: split32:
-; AVX1: vpminuw
-; AVX1: vpminuw
-; AVX1: vpminuw
-; AVX1: vpminuw
-; AVX1: ret
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpminuw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpminuw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpminuw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpminuw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
 ; AVX2-LABEL: split32:
-; AVX2: vpminuw
-; AVX2: vpminuw
-; AVX2: ret
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminuw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpminuw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    retq
   %1 = icmp ult <32 x i16> %a, %b
   %2 = select <32 x i1> %1, <32 x i16> %a, <32 x i16> %b
   ret <32 x i16> %2
@@ -44,29 +64,40 @@ define <32 x i16> @split32(<32 x i16> %a
 ; PR19492
 define i128 @split128(<2 x i128> %a, <2 x i128> %b) {
 ; SSE4-LABEL: split128:
-; SSE4: addq
-; SSE4: adcq
-; SSE4: addq
-; SSE4: adcq
-; SSE4: addq
-; SSE4: adcq
-; SSE4: ret
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    movq %rdx, %rax
+; SSE4-NEXT:    addq %r8, %rdi
+; SSE4-NEXT:    adcq %r9, %rsi
+; SSE4-NEXT:    addq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT:    adcq {{[0-9]+}}(%rsp), %rcx
+; SSE4-NEXT:    addq %rdi, %rax
+; SSE4-NEXT:    adcq %rsi, %rcx
+; SSE4-NEXT:    movq %rcx, %rdx
+; SSE4-NEXT:    retq
+;
 ; AVX1-LABEL: split128:
-; AVX1: addq
-; AVX1: adcq
-; AVX1: addq
-; AVX1: adcq
-; AVX1: addq
-; AVX1: adcq
-; AVX1: ret
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    movq %rdx, %rax
+; AVX1-NEXT:    addq %r8, %rdi
+; AVX1-NEXT:    adcq %r9, %rsi
+; AVX1-NEXT:    addq {{[0-9]+}}(%rsp), %rax
+; AVX1-NEXT:    adcq {{[0-9]+}}(%rsp), %rcx
+; AVX1-NEXT:    addq %rdi, %rax
+; AVX1-NEXT:    adcq %rsi, %rcx
+; AVX1-NEXT:    movq %rcx, %rdx
+; AVX1-NEXT:    retq
+;
 ; AVX2-LABEL: split128:
-; AVX2: addq
-; AVX2: adcq
-; AVX2: addq
-; AVX2: adcq
-; AVX2: addq
-; AVX2: adcq
-; AVX2: ret
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    movq %rdx, %rax
+; AVX2-NEXT:    addq %r8, %rdi
+; AVX2-NEXT:    adcq %r9, %rsi
+; AVX2-NEXT:    addq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT:    adcq {{[0-9]+}}(%rsp), %rcx
+; AVX2-NEXT:    addq %rdi, %rax
+; AVX2-NEXT:    adcq %rsi, %rcx
+; AVX2-NEXT:    movq %rcx, %rdx
+; AVX2-NEXT:    retq
   %add = add nsw <2 x i128> %a, %b
   %rdx.shuf = shufflevector <2 x i128> %add, <2 x i128> undef, <2 x i32> <i32 undef, i32 0>
   %bin.rdx = add <2 x i128> %add, %rdx.shuf

Modified: llvm/trunk/test/CodeGen/X86/vec_trunc_sext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_trunc_sext.ll?rev=369469&r1=369468&r2=369469&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_trunc_sext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_trunc_sext.ll Tue Aug 20 16:11:29 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-sse4.1 | FileCheck %s --check-prefix=NO_SSE_41
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE_41
 

Modified: llvm/trunk/test/CodeGen/X86/vec_udiv_to_shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_udiv_to_shift.ll?rev=369469&r1=369468&r2=369469&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_udiv_to_shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_udiv_to_shift.ll Tue Aug 20 16:11:29 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: opt < %s -instcombine -S | FileCheck %s
 
 define <8 x i16> @udiv_vec8x16(<8 x i16> %var) {

Modified: llvm/trunk/test/CodeGen/X86/vec_zero-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_zero-2.ll?rev=369469&r1=369468&r2=369469&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_zero-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_zero-2.ll Tue Aug 20 16:11:29 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-- -mattr=+sse2
 
 define i32 @t() {




More information about the llvm-commits mailing list