[llvm] r270422 - [X86][AVX] Regenerated avx upgraded intrinsics tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon May 23 05:39:07 PDT 2016


Author: rksimon
Date: Mon May 23 07:39:06 2016
New Revision: 270422

URL: http://llvm.org/viewvc/llvm-project?rev=270422&view=rev
Log:
[X86][AVX] Regenerated avx upgraded intrinsics tests

Modified:
    llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll

Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll?rev=270422&r1=270421&r2=270422&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll Mon May 23 07:39:06 2016
@@ -1,26 +1,33 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86 -mcpu=corei7-avx | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=avx | FileCheck %s
 
-; We don't check any vinsertf128 variant with immediate 0 because that's just a blend. 
+; We don't check any vinsertf128 variant with immediate 0 because that's just a blend.
 
 define <4 x double> @test_x86_avx_vinsertf128_pd_256_1(<4 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL:       test_x86_avx_vinsertf128_pd_256_1: 
-; CHECK:             vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-LABEL: test_x86_avx_vinsertf128_pd_256_1:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-NEXT:    retl
   %res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> %a0, <2 x double> %a1, i8 1)
   ret <4 x double> %res
 }
 declare <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double>, <2 x double>, i8) nounwind readnone
 
 define <8 x float> @test_x86_avx_vinsertf128_ps_256_1(<8 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL:      test_x86_avx_vinsertf128_ps_256_1: 
-; CHECK:            vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-LABEL: test_x86_avx_vinsertf128_ps_256_1:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-NEXT:    retl
   %res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %a0, <4 x float> %a1, i8 1)
   ret <8 x float> %res
 }
 declare <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float>, <4 x float>, i8) nounwind readnone
 
 define <8 x i32> @test_x86_avx_vinsertf128_si_256_1(<8 x i32> %a0, <4 x i32> %a1) {
-; CHECK-LABEL:    test_x86_avx_vinsertf128_si_256_1: 
-; CHECK:          vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_1:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-NEXT:    retl
   %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 1)
   ret <8 x i32> %res
 }
@@ -29,34 +36,45 @@ define <8 x i32> @test_x86_avx_vinsertf1
 ; of a vinsertf128 $0 which should be optimized into a blend, so just check that it's
 ; not a vinsertf128 $1.
 define <8 x i32> @test_x86_avx_vinsertf128_si_256_2(<8 x i32> %a0, <4 x i32> %a1) {
-; CHECK-LABEL:    test_x86_avx_vinsertf128_si_256_2: 
-; CHECK-NOT:      vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_2:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; CHECK-NEXT:    retl
   %res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 2)
   ret <8 x i32> %res
 }
 declare <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32>, <4 x i32>, i8) nounwind readnone
 
-; We don't check any vextractf128 variant with immediate 0 because that's just a move. 
+; We don't check any vextractf128 variant with immediate 0 because that's just a move.
 
 define <2 x double> @test_x86_avx_vextractf128_pd_256_1(<4 x double> %a0) {
-; CHECK-LABEL:       test_x86_avx_vextractf128_pd_256_1: 
-; CHECK:             vextractf128 $1, %ymm0, %xmm0
+; CHECK-LABEL: test_x86_avx_vextractf128_pd_256_1:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 1)
   ret <2 x double> %res
 }
 declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwind readnone
 
 define <4 x float> @test_x86_avx_vextractf128_ps_256_1(<8 x float> %a0) {
-; CHECK-LABEL:       test_x86_avx_vextractf128_ps_256_1: 
-; CHECK:             vextractf128 $1, %ymm0, %xmm0
+; CHECK-LABEL: test_x86_avx_vextractf128_ps_256_1:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a0, i8 1)
   ret <4 x float> %res
 }
 declare <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float>, i8) nounwind readnone
 
 define <4 x i32> @test_x86_avx_vextractf128_si_256_1(<8 x i32> %a0) {
-; CHECK-LABEL:    test_x86_avx_vextractf128_si_256_1: 
-; CHECK:          vextractf128 $1, %ymm0, %xmm0
+; CHECK-LABEL: test_x86_avx_vextractf128_si_256_1:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retl
   %res = call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %a0, i8 1)
   ret <4 x i32> %res
 }
@@ -66,16 +84,20 @@ declare <4 x i32> @llvm.x86.avx.vextract
 ; of a vextractf128 $0 which should be optimized away, so just check that it's
 ; not a vextractf128 of any kind.
 define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) {
-; CHECK-LABEL:       test_x86_avx_extractf128_pd_256_2: 
-; CHECK-NOT:         vextractf128
+; CHECK-LABEL: test_x86_avx_extractf128_pd_256_2:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 2)
   ret <2 x double> %res
 }
 
 
 define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
-; CHECK-LABEL:       test_x86_avx_blend_pd_256: 
-; CHECK:             vblendpd
+; CHECK-LABEL: test_x86_avx_blend_pd_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
+; CHECK-NEXT:    retl
   %res = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 7) ; <<4 x double>> [#uses=1]
   ret <4 x double> %res
 }
@@ -83,8 +105,10 @@ declare <4 x double> @llvm.x86.avx.blend
 
 
 define <8 x float> @test_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) {
-; CHECK-LABEL:      test_x86_avx_blend_ps_256: 
-; CHECK:            vblendps
+; CHECK-LABEL: test_x86_avx_blend_ps_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
+; CHECK-NEXT:    retl
   %res = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 7) ; <<8 x float>> [#uses=1]
   ret <8 x float> %res
 }
@@ -92,8 +116,10 @@ declare <8 x float> @llvm.x86.avx.blend.
 
 
 define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) {
-; CHECK-LABEL:      test_x86_avx_dp_ps_256: 
-; CHECK:            vdpps
+; CHECK-LABEL: test_x86_avx_dp_ps_256:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vdpps $7, %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    retl
   %res = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i32 7) ; <<8 x float>> [#uses=1]
   ret <8 x float> %res
 }
@@ -101,8 +127,10 @@ declare <8 x float> @llvm.x86.avx.dp.ps.
 
 
 define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) {
-; CHECK-LABEL:    test_x86_sse2_psll_dq: 
-; CHECK:          vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; CHECK-LABEL: test_x86_sse2_psll_dq:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+; CHECK-NEXT:    retl
   %res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -110,8 +138,10 @@ declare <2 x i64> @llvm.x86.sse2.psll.dq
 
 
 define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) {
-; CHECK-LABEL:    test_x86_sse2_psrl_dq: 
-; CHECK:          vpsrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+; CHECK-LABEL: test_x86_sse2_psrl_dq:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+; CHECK-NEXT:    retl
   %res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
@@ -119,8 +149,10 @@ declare <2 x i64> @llvm.x86.sse2.psrl.dq
 
 
 define <2 x double> @test_x86_sse41_blendpd(<2 x double> %a0, <2 x double> %a1) {
-; CHECK-LABEL:       test_x86_sse41_blendpd: 
-; CHECK:             vblendpd
+; CHECK-LABEL: test_x86_sse41_blendpd:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; CHECK-NEXT:    retl
   %res = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i8 2) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
@@ -128,8 +160,10 @@ declare <2 x double> @llvm.x86.sse41.ble
 
 
 define <4 x float> @test_x86_sse41_blendps(<4 x float> %a0, <4 x float> %a1) {
-; CHECK-LABEL:      test_x86_sse41_blendps: 
-; CHECK:            vblendps
+; CHECK-LABEL: test_x86_sse41_blendps:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
+; CHECK-NEXT:    retl
   %res = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -137,8 +171,10 @@ declare <4 x float> @llvm.x86.sse41.blen
 
 
 define <8 x i16> @test_x86_sse41_pblendw(<8 x i16> %a0, <8 x i16> %a1) {
-; CHECK-LABEL:    test_x86_sse41_pblendw: 
-; CHECK:          vpblendw
+; CHECK-LABEL: test_x86_sse41_pblendw:
+; CHECK:       ## BB#0:
+; CHECK-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6,7]
+; CHECK-NEXT:    retl
   %res = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i8 7) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -147,7 +183,7 @@ declare <8 x i16> @llvm.x86.sse41.pblend
 
 define <4 x i32> @test_x86_sse41_pmovsxbd(<16 x i8> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovsxbd:
-; CHECK:       # BB#0:
+; CHECK:       ## BB#0:
 ; CHECK-NEXT:    vpmovsxbd %xmm0, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %a0) ; <<4 x i32>> [#uses=1]
@@ -158,7 +194,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovsx
 
 define <2 x i64> @test_x86_sse41_pmovsxbq(<16 x i8> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovsxbq:
-; CHECK:       # BB#0:
+; CHECK:       ## BB#0:
 ; CHECK-NEXT:    vpmovsxbq %xmm0, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %a0) ; <<2 x i64>> [#uses=1]
@@ -169,7 +205,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovsx
 
 define <8 x i16> @test_x86_sse41_pmovsxbw(<16 x i8> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovsxbw:
-; CHECK:       # BB#0:
+; CHECK:       ## BB#0:
 ; CHECK-NEXT:    vpmovsxbw %xmm0, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %a0) ; <<8 x i16>> [#uses=1]
@@ -180,7 +216,7 @@ declare <8 x i16> @llvm.x86.sse41.pmovsx
 
 define <2 x i64> @test_x86_sse41_pmovsxdq(<4 x i32> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovsxdq:
-; CHECK:       # BB#0:
+; CHECK:       ## BB#0:
 ; CHECK-NEXT:    vpmovsxdq %xmm0, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %a0) ; <<2 x i64>> [#uses=1]
@@ -191,7 +227,7 @@ declare <2 x i64> @llvm.x86.sse41.pmovsx
 
 define <4 x i32> @test_x86_sse41_pmovsxwd(<8 x i16> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovsxwd:
-; CHECK:       # BB#0:
+; CHECK:       ## BB#0:
 ; CHECK-NEXT:    vpmovsxwd %xmm0, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %a0) ; <<4 x i32>> [#uses=1]
@@ -202,7 +238,7 @@ declare <4 x i32> @llvm.x86.sse41.pmovsx
 
 define <2 x i64> @test_x86_sse41_pmovsxwq(<8 x i16> %a0) {
 ; CHECK-LABEL: test_x86_sse41_pmovsxwq:
-; CHECK:       # BB#0:
+; CHECK:       ## BB#0:
 ; CHECK-NEXT:    vpmovsxwq %xmm0, %xmm0
 ; CHECK-NEXT:    retl
   %res = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %a0) ; <<2 x i64>> [#uses=1]




More information about the llvm-commits mailing list