[llvm] r218854 - [x86] Switch some of the new consolidated vector tests to use
Chandler Carruth
chandlerc at gmail.com
Wed Oct 1 23:52:19 PDT 2014
Author: chandlerc
Date: Thu Oct 2 01:52:19 2014
New Revision: 218854
URL: http://llvm.org/viewvc/llvm-project?rev=218854&view=rev
Log:
[x86] Switch some of the new consolidated vector tests to use
a bare-metal triple and have nice BB labels, etc.
No significant change here, just tidying up to have a consistent set of
OS-agnostic vector functionality here.
Modified:
llvm/trunk/test/CodeGen/X86/vector-blend.ll
llvm/trunk/test/CodeGen/X86/vector-sext.ll
llvm/trunk/test/CodeGen/X86/vector-zext.ll
Modified: llvm/trunk/test/CodeGen/X86/vector-blend.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-blend.ll?rev=218854&r1=218853&r2=218854&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-blend.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-blend.ll Thu Oct 2 01:52:19 2014
@@ -1,228 +1,236 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
; AVX128 tests:
define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) {
; SSE2-LABEL: vsel_float:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: andps {{.*}}, %xmm1
; SSE2-NEXT: andps {{.*}}, %xmm0
; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_float:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: andps {{.*}}, %xmm1
; SSSE3-NEXT: andps {{.*}}, %xmm0
; SSSE3-NEXT: orps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_float:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_float:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX-NEXT: retq
+entry:
%vsel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x float> %v1, <4 x float> %v2
ret <4 x float> %vsel
}
define <4 x float> @vsel_float2(<4 x float> %v1, <4 x float> %v2) {
; SSE-LABEL: vsel_float2:
-; SSE: ## BB#0:
+; SSE: # BB#0: # %entry
; SSE-NEXT: movss %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: vsel_float2:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vmovss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
+entry:
%vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %v1, <4 x float> %v2
ret <4 x float> %vsel
}
define <4 x i8> @vsel_4xi8(<4 x i8> %v1, <4 x i8> %v2) {
; SSE2-LABEL: vsel_4xi8:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: andps {{.*}}, %xmm1
; SSE2-NEXT: andps {{.*}}, %xmm0
; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_4xi8:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: andps {{.*}}, %xmm1
; SSSE3-NEXT: andps {{.*}}, %xmm0
; SSSE3-NEXT: orps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_4xi8:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_4xi8:
-; AVX1: ## BB#0:
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_4xi8:
-; AVX2: ## BB#0:
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; AVX2-NEXT: retq
+entry:
%vsel = select <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i8> %v1, <4 x i8> %v2
ret <4 x i8> %vsel
}
define <4 x i16> @vsel_4xi16(<4 x i16> %v1, <4 x i16> %v2) {
; SSE2-LABEL: vsel_4xi16:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: andps {{.*}}, %xmm1
; SSE2-NEXT: andps {{.*}}, %xmm0
; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_4xi16:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: andps {{.*}}, %xmm1
; SSSE3-NEXT: andps {{.*}}, %xmm0
; SSSE3-NEXT: orps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_4xi16:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_4xi16:
-; AVX1: ## BB#0:
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_4xi16:
-; AVX2: ## BB#0:
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; AVX2-NEXT: retq
+entry:
%vsel = select <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x i16> %v1, <4 x i16> %v2
ret <4 x i16> %vsel
}
define <4 x i32> @vsel_i32(<4 x i32> %v1, <4 x i32> %v2) {
; SSE2-LABEL: vsel_i32:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: andps {{.*}}, %xmm1
; SSE2-NEXT: andps {{.*}}, %xmm0
; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i32:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: andps {{.*}}, %xmm1
; SSSE3-NEXT: andps {{.*}}, %xmm0
; SSSE3-NEXT: orps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i32:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX1-LABEL: vsel_i32:
-; AVX1: ## BB#0:
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_i32:
-; AVX2: ## BB#0:
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX2-NEXT: retq
+entry:
%vsel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i32> %v1, <4 x i32> %v2
ret <4 x i32> %vsel
}
define <2 x double> @vsel_double(<2 x double> %v1, <2 x double> %v2) {
; SSE-LABEL: vsel_double:
-; SSE: ## BB#0:
+; SSE: # BB#0: # %entry
; SSE-NEXT: movsd %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: vsel_double:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vmovsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
+entry:
%vsel = select <2 x i1> <i1 true, i1 false>, <2 x double> %v1, <2 x double> %v2
ret <2 x double> %vsel
}
define <2 x i64> @vsel_i64(<2 x i64> %v1, <2 x i64> %v2) {
; SSE-LABEL: vsel_i64:
-; SSE: ## BB#0:
+; SSE: # BB#0: # %entry
; SSE-NEXT: movsd %xmm0, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: vsel_i64:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vmovsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
+entry:
%vsel = select <2 x i1> <i1 true, i1 false>, <2 x i64> %v1, <2 x i64> %v2
ret <2 x i64> %vsel
}
define <8 x i16> @vsel_8xi16(<8 x i16> %v1, <8 x i16> %v2) {
; SSE2-LABEL: vsel_8xi16:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: andps {{.*}}, %xmm1
; SSE2-NEXT: andps {{.*}}, %xmm0
; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_8xi16:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: andps {{.*}}, %xmm1
; SSSE3-NEXT: andps {{.*}}, %xmm0
; SSSE3-NEXT: orps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_8xi16:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7]
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_8xi16:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7]
; AVX-NEXT: retq
+entry:
%vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x i16> %v1, <8 x i16> %v2
ret <8 x i16> %vsel
}
define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) {
; SSE2-LABEL: vsel_i8:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: andps {{.*}}, %xmm1
; SSE2-NEXT: andps {{.*}}, %xmm0
; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i8:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: andps {{.*}}, %xmm1
; SSSE3-NEXT: andps {{.*}}, %xmm0
; SSSE3-NEXT: orps %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i8:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE41-NEXT: pblendvb %xmm2, %xmm1
@@ -230,10 +238,11 @@ define <16 x i8> @vsel_i8(<16 x i8> %v1,
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_i8:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
+entry:
%vsel = select <16 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <16 x i8> %v1, <16 x i8> %v2
ret <16 x i8> %vsel
}
@@ -243,7 +252,7 @@ define <16 x i8> @vsel_i8(<16 x i8> %v1,
define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) {
; SSE-LABEL: vsel_float8:
-; SSE: ## BB#0:
+; SSE: # BB#0: # %entry
; SSE-NEXT: movss %xmm0, %xmm2
; SSE-NEXT: movss %xmm1, %xmm3
; SSE-NEXT: movaps %xmm2, %xmm0
@@ -251,16 +260,17 @@ define <8 x float> @vsel_float8(<8 x flo
; SSE-NEXT: retq
;
; AVX-LABEL: vsel_float8:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; AVX-NEXT: retq
+entry:
%vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x float> %v1, <8 x float> %v2
ret <8 x float> %vsel
}
define <8 x i32> @vsel_i328(<8 x i32> %v1, <8 x i32> %v2) {
; SSE-LABEL: vsel_i328:
-; SSE: ## BB#0:
+; SSE: # BB#0: # %entry
; SSE-NEXT: movss %xmm0, %xmm2
; SSE-NEXT: movss %xmm1, %xmm3
; SSE-NEXT: movaps %xmm2, %xmm0
@@ -268,21 +278,22 @@ define <8 x i32> @vsel_i328(<8 x i32> %v
; SSE-NEXT: retq
;
; AVX1-LABEL: vsel_i328:
-; AVX1: ## BB#0:
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: vsel_i328:
-; AVX2: ## BB#0:
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
; AVX2-NEXT: retq
+entry:
%vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x i32> %v1, <8 x i32> %v2
ret <8 x i32> %vsel
}
define <8 x double> @vsel_double8(<8 x double> %v1, <8 x double> %v2) {
; SSE2-LABEL: vsel_double8:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movsd %xmm0, %xmm4
; SSE2-NEXT: movsd %xmm2, %xmm6
; SSE2-NEXT: movaps %xmm4, %xmm0
@@ -292,7 +303,7 @@ define <8 x double> @vsel_double8(<8 x d
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_double8:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movsd %xmm0, %xmm4
; SSSE3-NEXT: movsd %xmm2, %xmm6
; SSSE3-NEXT: movaps %xmm4, %xmm0
@@ -302,7 +313,7 @@ define <8 x double> @vsel_double8(<8 x d
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_double8:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm4[1]
; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm5[0,1]
; SSE41-NEXT: blendpd {{.*#+}} xmm2 = xmm2[0],xmm6[1]
@@ -310,17 +321,18 @@ define <8 x double> @vsel_double8(<8 x d
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_double8:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3]
; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm3[1,2,3]
; AVX-NEXT: retq
+entry:
%vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x double> %v1, <8 x double> %v2
ret <8 x double> %vsel
}
define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) {
; SSE2-LABEL: vsel_i648:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movsd %xmm0, %xmm4
; SSE2-NEXT: movsd %xmm2, %xmm6
; SSE2-NEXT: movaps %xmm4, %xmm0
@@ -330,7 +342,7 @@ define <8 x i64> @vsel_i648(<8 x i64> %v
; SSE2-NEXT: retq
;
; SSSE3-LABEL: vsel_i648:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movsd %xmm0, %xmm4
; SSSE3-NEXT: movsd %xmm2, %xmm6
; SSSE3-NEXT: movaps %xmm4, %xmm0
@@ -340,7 +352,7 @@ define <8 x i64> @vsel_i648(<8 x i64> %v
; SSSE3-NEXT: retq
;
; SSE41-LABEL: vsel_i648:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm4[1]
; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm5[0,1]
; SSE41-NEXT: blendpd {{.*#+}} xmm2 = xmm2[0],xmm6[1]
@@ -348,17 +360,18 @@ define <8 x i64> @vsel_i648(<8 x i64> %v
; SSE41-NEXT: retq
;
; AVX-LABEL: vsel_i648:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3]
; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm3[1,2,3]
; AVX-NEXT: retq
+entry:
%vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x i64> %v1, <8 x i64> %v2
ret <8 x i64> %vsel
}
define <4 x double> @vsel_double4(<4 x double> %v1, <4 x double> %v2) {
; SSE-LABEL: vsel_double4:
-; SSE: ## BB#0:
+; SSE: # BB#0: # %entry
; SSE-NEXT: movsd %xmm0, %xmm2
; SSE-NEXT: movsd %xmm1, %xmm3
; SSE-NEXT: movaps %xmm2, %xmm0
@@ -366,16 +379,17 @@ define <4 x double> @vsel_double4(<4 x d
; SSE-NEXT: retq
;
; AVX-LABEL: vsel_double4:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
; AVX-NEXT: retq
+entry:
%vsel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x double> %v1, <4 x double> %v2
ret <4 x double> %vsel
}
define <2 x double> @testa(<2 x double> %x, <2 x double> %y) {
; SSE2-LABEL: testa:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movapd %xmm1, %xmm2
; SSE2-NEXT: cmplepd %xmm0, %xmm2
; SSE2-NEXT: andpd %xmm2, %xmm0
@@ -384,7 +398,7 @@ define <2 x double> @testa(<2 x double>
; SSE2-NEXT: retq
;
; SSSE3-LABEL: testa:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movapd %xmm1, %xmm2
; SSSE3-NEXT: cmplepd %xmm0, %xmm2
; SSSE3-NEXT: andpd %xmm2, %xmm0
@@ -393,7 +407,7 @@ define <2 x double> @testa(<2 x double>
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testa:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: movapd %xmm0, %xmm2
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: cmplepd %xmm2, %xmm0
@@ -402,10 +416,11 @@ define <2 x double> @testa(<2 x double>
; SSE41-NEXT: retq
;
; AVX-LABEL: testa:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
+entry:
%max_is_x = fcmp oge <2 x double> %x, %y
%max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %max
@@ -413,7 +428,7 @@ define <2 x double> @testa(<2 x double>
define <2 x double> @testb(<2 x double> %x, <2 x double> %y) {
; SSE2-LABEL: testb:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movapd %xmm1, %xmm2
; SSE2-NEXT: cmpnlepd %xmm0, %xmm2
; SSE2-NEXT: andpd %xmm2, %xmm0
@@ -422,7 +437,7 @@ define <2 x double> @testb(<2 x double>
; SSE2-NEXT: retq
;
; SSSE3-LABEL: testb:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movapd %xmm1, %xmm2
; SSSE3-NEXT: cmpnlepd %xmm0, %xmm2
; SSSE3-NEXT: andpd %xmm2, %xmm0
@@ -431,7 +446,7 @@ define <2 x double> @testb(<2 x double>
; SSSE3-NEXT: retq
;
; SSE41-LABEL: testb:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: movapd %xmm0, %xmm2
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: cmpnlepd %xmm2, %xmm0
@@ -440,10 +455,11 @@ define <2 x double> @testb(<2 x double>
; SSE41-NEXT: retq
;
; AVX-LABEL: testb:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vcmpnlepd %xmm0, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
+entry:
%min_is_x = fcmp ult <2 x double> %x, %y
%min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
ret <2 x double> %min
@@ -453,23 +469,24 @@ define <2 x double> @testb(<2 x double>
; blend instruction with an immediate mask
define <4 x double> @constant_blendvpd_avx(<4 x double> %xy, <4 x double> %ab) {
; SSE-LABEL: constant_blendvpd_avx:
-; SSE: ## BB#0:
+; SSE: # BB#0: # %entry
; SSE-NEXT: movsd %xmm1, %xmm3
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: movaps %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: constant_blendvpd_avx:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3]
; AVX-NEXT: retq
- %1 = select <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x double> %xy, <4 x double> %ab
- ret <4 x double> %1
+entry:
+ %select = select <4 x i1> <i1 false, i1 false, i1 true, i1 false>, <4 x double> %xy, <4 x double> %ab
+ ret <4 x double> %select
}
define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd) {
; SSE2-LABEL: constant_blendvps_avx:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movaps {{.*#+}} xmm4 = [4294967295,4294967295,4294967295,0]
; SSE2-NEXT: andps %xmm4, %xmm2
; SSE2-NEXT: movaps {{.*#+}} xmm5 = [0,0,0,4294967295]
@@ -481,7 +498,7 @@ define <8 x float> @constant_blendvps_av
; SSE2-NEXT: retq
;
; SSSE3-LABEL: constant_blendvps_avx:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movaps {{.*#+}} xmm4 = [4294967295,4294967295,4294967295,0]
; SSSE3-NEXT: andps %xmm4, %xmm2
; SSSE3-NEXT: movaps {{.*#+}} xmm5 = [0,0,0,4294967295]
@@ -493,22 +510,23 @@ define <8 x float> @constant_blendvps_av
; SSSE3-NEXT: retq
;
; SSE41-LABEL: constant_blendvps_avx:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: constant_blendvps_avx:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
; AVX-NEXT: retq
- %1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true>, <8 x float> %xyzw, <8 x float> %abcd
- ret <8 x float> %1
+entry:
+ %select = select <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 true>, <8 x float> %xyzw, <8 x float> %abcd
+ ret <8 x float> %select
}
define <32 x i8> @constant_pblendvb_avx2(<32 x i8> %xyzw, <32 x i8> %abcd) {
; SSE2-LABEL: constant_pblendvb_avx2:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movaps {{.*#+}} xmm4 = [255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255]
; SSE2-NEXT: andps %xmm4, %xmm2
; SSE2-NEXT: movaps {{.*#+}} xmm5 = [0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0]
@@ -520,7 +538,7 @@ define <32 x i8> @constant_pblendvb_avx2
; SSE2-NEXT: retq
;
; SSSE3-LABEL: constant_pblendvb_avx2:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movaps {{.*#+}} xmm4 = [255,255,0,255,0,0,0,255,255,255,0,255,0,0,0,255]
; SSSE3-NEXT: andps %xmm4, %xmm2
; SSSE3-NEXT: movaps {{.*#+}} xmm5 = [0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0]
@@ -532,7 +550,7 @@ define <32 x i8> @constant_pblendvb_avx2
; SSSE3-NEXT: retq
;
; SSE41-LABEL: constant_pblendvb_avx2:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0]
; SSE41-NEXT: pblendvb %xmm4, %xmm2
@@ -542,19 +560,20 @@ define <32 x i8> @constant_pblendvb_avx2
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_pblendvb_avx2:
-; AVX1: ## BB#0:
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vandps {{.*}}, %ymm1, %ymm1
; AVX1-NEXT: vandps {{.*}}, %ymm0, %ymm0
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_pblendvb_avx2:
-; AVX2: ## BB#0:
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0,0,0,255,0,255,255,255,0]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
- %1 = select <32 x i1> <i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <32 x i8> %xyzw, <32 x i8> %abcd
- ret <32 x i8> %1
+entry:
+ %select = select <32 x i1> <i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <32 x i8> %xyzw, <32 x i8> %abcd
+ ret <32 x i8> %select
}
declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>)
@@ -563,33 +582,34 @@ declare <4 x double> @llvm.x86.avx.blend
;; 4 tests for shufflevectors that optimize to blend + immediate
define <4 x float> @blend_shufflevector_4xfloat(<4 x float> %a, <4 x float> %b) {
; SSE2-LABEL: blend_shufflevector_4xfloat:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_shufflevector_4xfloat:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[1,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_shufflevector_4xfloat:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_shufflevector_4xfloat:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX-NEXT: retq
- %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
- ret <4 x float> %1
+entry:
+ %select = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %select
}
define <8 x float> @blend_shufflevector_8xfloat(<8 x float> %a, <8 x float> %b) {
; SSE2-LABEL: blend_shufflevector_8xfloat:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movss %xmm0, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[0,2]
@@ -598,7 +618,7 @@ define <8 x float> @blend_shufflevector_
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_shufflevector_8xfloat:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movss %xmm0, %xmm2
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[0,2]
@@ -607,7 +627,7 @@ define <8 x float> @blend_shufflevector_
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_shufflevector_8xfloat:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: blendps {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3]
; SSE41-NEXT: movss %xmm0, %xmm2
; SSE41-NEXT: movaps %xmm2, %xmm0
@@ -615,39 +635,42 @@ define <8 x float> @blend_shufflevector_
; SSE41-NEXT: retq
;
; AVX-LABEL: blend_shufflevector_8xfloat:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5],ymm0[6],ymm1[7]
; AVX-NEXT: retq
- %1 = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 6, i32 15>
- ret <8 x float> %1
+entry:
+ %select = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 6, i32 15>
+ ret <8 x float> %select
}
define <4 x double> @blend_shufflevector_4xdouble(<4 x double> %a, <4 x double> %b) {
; SSE-LABEL: blend_shufflevector_4xdouble:
-; SSE: ## BB#0:
+; SSE: # BB#0: # %entry
; SSE-NEXT: movsd %xmm0, %xmm2
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: blend_shufflevector_4xdouble:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
; AVX-NEXT: retq
- %1 = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
- ret <4 x double> %1
+entry:
+ %select = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
+ ret <4 x double> %select
}
define <4 x i64> @blend_shufflevector_4xi64(<4 x i64> %a, <4 x i64> %b) {
; SSE-LABEL: blend_shufflevector_4xi64:
-; SSE: ## BB#0:
+; SSE: # BB#0: # %entry
; SSE-NEXT: movsd %xmm2, %xmm0
; SSE-NEXT: movaps %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: blend_shufflevector_4xi64:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3]
; AVX-NEXT: retq
- %1 = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
- ret <4 x i64> %1
+entry:
+ %select = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
+ ret <4 x i64> %select
}
Modified: llvm/trunk/test/CodeGen/X86/vector-sext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-sext.ll?rev=218854&r1=218853&r2=218854&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-sext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-sext.ll Thu Oct 2 01:52:19 2014
@@ -1,17 +1,17 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
;
; Just one 32-bit run to make sure we do reasonable things there.
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=i686 -mattr=+sse4.1 | FileCheck %s --check-prefix=X32-SSE41
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=i686 -mattr=+sse4.1 | FileCheck %s --check-prefix=X32-SSE41
define <8 x i32> @sext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_8i16_to_8i32:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: ## kill: XMM0<def> XMM1<kill>
+; SSE2-NEXT: # kill: XMM0<def> XMM1<kill>
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: pslld $16, %xmm0
; SSE2-NEXT: psrad $16, %xmm0
@@ -21,9 +21,9 @@ define <8 x i32> @sext_8i16_to_8i32(<8 x
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_8i16_to_8i32:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm1
-; SSSE3-NEXT: ## kill: XMM0<def> XMM1<kill>
+; SSSE3-NEXT: # kill: XMM0<def> XMM1<kill>
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: pslld $16, %xmm0
; SSSE3-NEXT: psrad $16, %xmm0
@@ -33,7 +33,7 @@ define <8 x i32> @sext_8i16_to_8i32(<8 x
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_8i16_to_8i32:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pmovzxwd %xmm1, %xmm0
; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -44,7 +44,7 @@ define <8 x i32> @sext_8i16_to_8i32(<8 x
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_8i16_to_8i32:
-; AVX1: ## BB#0:
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
@@ -52,12 +52,12 @@ define <8 x i32> @sext_8i16_to_8i32(<8 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_8i16_to_8i32:
-; AVX2: ## BB#0:
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; X32-SSE41-LABEL: sext_8i16_to_8i32:
-; X32-SSE41: ## BB#0:
+; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: movdqa %xmm0, %xmm1
; X32-SSE41-NEXT: pmovzxwd %xmm1, %xmm0
; X32-SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -66,14 +66,14 @@ define <8 x i32> @sext_8i16_to_8i32(<8 x
; X32-SSE41-NEXT: pslld $16, %xmm0
; X32-SSE41-NEXT: psrad $16, %xmm0
; X32-SSE41-NEXT: retl
-
+entry:
%B = sext <8 x i16> %A to <8 x i32>
ret <8 x i32>%B
}
define <4 x i64> @sext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: sext_4i32_to_4i64:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
; SSE2-NEXT: movd %xmm1, %rax
; SSE2-NEXT: cltq
@@ -96,7 +96,7 @@ define <4 x i64> @sext_4i32_to_4i64(<4 x
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_4i32_to_4i64:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
; SSSE3-NEXT: movd %xmm1, %rax
; SSSE3-NEXT: cltq
@@ -119,7 +119,7 @@ define <4 x i64> @sext_4i32_to_4i64(<4 x
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_4i32_to_4i64:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: pmovzxdq %xmm0, %xmm1
; SSE41-NEXT: pextrq $1, %xmm1, %rax
; SSE41-NEXT: cltq
@@ -140,7 +140,7 @@ define <4 x i64> @sext_4i32_to_4i64(<4 x
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_4i32_to_4i64:
-; AVX1: ## BB#0:
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
; AVX1-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
@@ -148,12 +148,12 @@ define <4 x i64> @sext_4i32_to_4i64(<4 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_4i32_to_4i64:
-; AVX2: ## BB#0:
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: retq
;
; X32-SSE41-LABEL: sext_4i32_to_4i64:
-; X32-SSE41: ## BB#0:
+; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: pmovzxdq %xmm0, %xmm2
; X32-SSE41-NEXT: movd %xmm2, %eax
; X32-SSE41-NEXT: sarl $31, %eax
@@ -170,42 +170,42 @@ define <4 x i64> @sext_4i32_to_4i64(<4 x
; X32-SSE41-NEXT: pinsrd $3, %ecx, %xmm1
; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
; X32-SSE41-NEXT: retl
-
+entry:
%B = sext <4 x i32> %A to <4 x i64>
ret <4 x i64>%B
}
define <4 x i32> @load_sext_test1(<4 x i16> *%ptr) {
; SSE2-LABEL: load_sext_test1:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movq (%rdi), %xmm0
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: psrad $16, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_test1:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movq (%rdi), %xmm0
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSSE3-NEXT: psrad $16, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_test1:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: pmovsxwd (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_test1:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_test1:
-; X32-SSE41: ## BB#0:
+; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxwd (%eax), %xmm0
; X32-SSE41-NEXT: retl
-
+entry:
%X = load <4 x i16>* %ptr
%Y = sext <4 x i16> %X to <4 x i32>
ret <4 x i32>%Y
@@ -213,7 +213,7 @@ define <4 x i32> @load_sext_test1(<4 x i
define <4 x i32> @load_sext_test2(<4 x i8> *%ptr) {
; SSE2-LABEL: load_sext_test2:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movl (%rdi), %eax
; SSE2-NEXT: movl %eax, %ecx
; SSE2-NEXT: shll $8, %ecx
@@ -229,27 +229,28 @@ define <4 x i32> @load_sext_test2(<4 x i
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_test2:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movd (%rdi), %xmm0
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3]
; SSSE3-NEXT: psrad $24, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_test2:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: pmovsxbd (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_test2:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vpmovsxbd (%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_test2:
-; X32-SSE41: ## BB#0:
+; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxbd (%eax), %xmm0
; X32-SSE41-NEXT: retl
+entry:
%X = load <4 x i8>* %ptr
%Y = sext <4 x i8> %X to <4 x i32>
ret <4 x i32>%Y
@@ -257,7 +258,7 @@ define <4 x i32> @load_sext_test2(<4 x i
define <2 x i64> @load_sext_test3(<2 x i8> *%ptr) {
; SSE2-LABEL: load_sext_test3:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movsbq 1(%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: movsbq (%rdi), %rax
@@ -266,7 +267,7 @@ define <2 x i64> @load_sext_test3(<2 x i
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_test3:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movsbq 1(%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm1
; SSSE3-NEXT: movsbq (%rdi), %rax
@@ -275,20 +276,21 @@ define <2 x i64> @load_sext_test3(<2 x i
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_test3:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_test3:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vpmovsxbq (%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_test3:
-; X32-SSE41: ## BB#0:
+; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxbq (%eax), %xmm0
; X32-SSE41-NEXT: retl
+entry:
%X = load <2 x i8>* %ptr
%Y = sext <2 x i8> %X to <2 x i64>
ret <2 x i64>%Y
@@ -296,7 +298,7 @@ define <2 x i64> @load_sext_test3(<2 x i
define <2 x i64> @load_sext_test4(<2 x i16> *%ptr) {
; SSE2-LABEL: load_sext_test4:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movswq 2(%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: movswq (%rdi), %rax
@@ -305,7 +307,7 @@ define <2 x i64> @load_sext_test4(<2 x i
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_test4:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movswq 2(%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm1
; SSSE3-NEXT: movswq (%rdi), %rax
@@ -314,20 +316,21 @@ define <2 x i64> @load_sext_test4(<2 x i
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_test4:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: pmovsxwq (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_test4:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vpmovsxwq (%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_test4:
-; X32-SSE41: ## BB#0:
+; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxwq (%eax), %xmm0
; X32-SSE41-NEXT: retl
+entry:
%X = load <2 x i16>* %ptr
%Y = sext <2 x i16> %X to <2 x i64>
ret <2 x i64>%Y
@@ -335,7 +338,7 @@ define <2 x i64> @load_sext_test4(<2 x i
define <2 x i64> @load_sext_test5(<2 x i32> *%ptr) {
; SSE2-LABEL: load_sext_test5:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movslq 4(%rdi), %rax
; SSE2-NEXT: movd %rax, %xmm1
; SSE2-NEXT: movslq (%rdi), %rax
@@ -344,7 +347,7 @@ define <2 x i64> @load_sext_test5(<2 x i
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_test5:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movslq 4(%rdi), %rax
; SSSE3-NEXT: movd %rax, %xmm1
; SSSE3-NEXT: movslq (%rdi), %rax
@@ -353,20 +356,21 @@ define <2 x i64> @load_sext_test5(<2 x i
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_test5:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: pmovsxdq (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_test5:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vpmovsxdq (%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_test5:
-; X32-SSE41: ## BB#0:
+; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxdq (%eax), %xmm0
; X32-SSE41-NEXT: retl
+entry:
%X = load <2 x i32>* %ptr
%Y = sext <2 x i32> %X to <2 x i64>
ret <2 x i64>%Y
@@ -374,34 +378,35 @@ define <2 x i64> @load_sext_test5(<2 x i
define <8 x i16> @load_sext_test6(<8 x i8> *%ptr) {
; SSE2-LABEL: load_sext_test6:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movq (%rdi), %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psraw $8, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_test6:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movq (%rdi), %xmm0
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSSE3-NEXT: psraw $8, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_test6:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: load_sext_test6:
-; AVX: ## BB#0:
+; AVX: # BB#0: # %entry
; AVX-NEXT: vpmovsxbw (%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_test6:
-; X32-SSE41: ## BB#0:
+; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: pmovsxbw (%eax), %xmm0
; X32-SSE41-NEXT: retl
+entry:
%X = load <8 x i8>* %ptr
%Y = sext <8 x i8> %X to <8 x i16>
ret <8 x i16>%Y
@@ -409,7 +414,7 @@ define <8 x i16> @load_sext_test6(<8 x i
define <4 x i64> @sext_4i1_to_4i64(<4 x i1> %mask) {
; SSE2-LABEL: sext_4i1_to_4i64:
-; SSE2: ## BB#0:
+; SSE2: # BB#0:
; SSE2-NEXT: pslld $31, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
@@ -434,7 +439,7 @@ define <4 x i64> @sext_4i1_to_4i64(<4 x
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_4i1_to_4i64:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0:
; SSSE3-NEXT: pslld $31, %xmm0
; SSSE3-NEXT: psrad $31, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
@@ -459,7 +464,7 @@ define <4 x i64> @sext_4i1_to_4i64(<4 x
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_4i1_to_4i64:
-; SSE41: ## BB#0:
+; SSE41: # BB#0:
; SSE41-NEXT: pslld $31, %xmm0
; SSE41-NEXT: psrad $31, %xmm0
; SSE41-NEXT: pmovzxdq %xmm0, %xmm1
@@ -482,7 +487,7 @@ define <4 x i64> @sext_4i1_to_4i64(<4 x
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_4i1_to_4i64:
-; AVX1: ## BB#0:
+; AVX1: # BB#0:
; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
@@ -492,14 +497,14 @@ define <4 x i64> @sext_4i1_to_4i64(<4 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_4i1_to_4i64:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: retq
;
; X32-SSE41-LABEL: sext_4i1_to_4i64:
-; X32-SSE41: ## BB#0:
+; X32-SSE41: # BB#0:
; X32-SSE41-NEXT: pslld $31, %xmm0
; X32-SSE41-NEXT: psrad $31, %xmm0
; X32-SSE41-NEXT: pmovzxdq %xmm0, %xmm2
@@ -524,7 +529,7 @@ define <4 x i64> @sext_4i1_to_4i64(<4 x
define <16 x i16> @sext_16i8_to_16i16(<16 x i8> *%ptr) {
; SSE2-LABEL: sext_16i8_to_16i16:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa (%rdi), %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -536,7 +541,7 @@ define <16 x i16> @sext_16i8_to_16i16(<1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_16i8_to_16i16:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa (%rdi), %xmm1
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -548,7 +553,7 @@ define <16 x i16> @sext_16i8_to_16i16(<1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_16i8_to_16i16:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa (%rdi), %xmm1
; SSE41-NEXT: pmovzxbw %xmm1, %xmm0
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
@@ -559,7 +564,7 @@ define <16 x i16> @sext_16i8_to_16i16(<1
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_16i8_to_16i16:
-; AVX1: ## BB#0:
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
; AVX1-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -568,13 +573,13 @@ define <16 x i16> @sext_16i8_to_16i16(<1
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_16i8_to_16i16:
-; AVX2: ## BB#0:
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vmovdqa (%rdi), %xmm0
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX2-NEXT: retq
;
; X32-SSE41-LABEL: sext_16i8_to_16i16:
-; X32-SSE41: ## BB#0:
+; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movdqa (%eax), %xmm1
; X32-SSE41-NEXT: pmovzxbw %xmm1, %xmm0
@@ -584,6 +589,7 @@ define <16 x i16> @sext_16i8_to_16i16(<1
; X32-SSE41-NEXT: psllw $8, %xmm0
; X32-SSE41-NEXT: psraw $8, %xmm0
; X32-SSE41-NEXT: retl
+entry:
%X = load <16 x i8>* %ptr
%Y = sext <16 x i8> %X to <16 x i16>
ret <16 x i16> %Y
@@ -591,7 +597,7 @@ define <16 x i16> @sext_16i8_to_16i16(<1
define <4 x i64> @sext_4i8_to_4i64(<4 x i8> %mask) {
; SSE2-LABEL: sext_4i8_to_4i64:
-; SSE2: ## BB#0:
+; SSE2: # BB#0:
; SSE2-NEXT: pslld $24, %xmm0
; SSE2-NEXT: psrad $24, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
@@ -616,7 +622,7 @@ define <4 x i64> @sext_4i8_to_4i64(<4 x
; SSE2-NEXT: retq
;
; SSSE3-LABEL: sext_4i8_to_4i64:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0:
; SSSE3-NEXT: pslld $24, %xmm0
; SSSE3-NEXT: psrad $24, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,0]
@@ -641,7 +647,7 @@ define <4 x i64> @sext_4i8_to_4i64(<4 x
; SSSE3-NEXT: retq
;
; SSE41-LABEL: sext_4i8_to_4i64:
-; SSE41: ## BB#0:
+; SSE41: # BB#0:
; SSE41-NEXT: pslld $24, %xmm0
; SSE41-NEXT: psrad $24, %xmm0
; SSE41-NEXT: pmovzxdq %xmm0, %xmm1
@@ -664,7 +670,7 @@ define <4 x i64> @sext_4i8_to_4i64(<4 x
; SSE41-NEXT: retq
;
; AVX1-LABEL: sext_4i8_to_4i64:
-; AVX1: ## BB#0:
+; AVX1: # BB#0:
; AVX1-NEXT: vpslld $24, %xmm0, %xmm0
; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
@@ -674,14 +680,14 @@ define <4 x i64> @sext_4i8_to_4i64(<4 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: sext_4i8_to_4i64:
-; AVX2: ## BB#0:
+; AVX2: # BB#0:
; AVX2-NEXT: vpslld $24, %xmm0, %xmm0
; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: retq
;
; X32-SSE41-LABEL: sext_4i8_to_4i64:
-; X32-SSE41: ## BB#0:
+; X32-SSE41: # BB#0:
; X32-SSE41-NEXT: pslld $24, %xmm0
; X32-SSE41-NEXT: psrad $24, %xmm0
; X32-SSE41-NEXT: pmovzxdq %xmm0, %xmm2
@@ -706,7 +712,7 @@ define <4 x i64> @sext_4i8_to_4i64(<4 x
define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
; SSE2-LABEL: load_sext_4i8_to_4i64:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: pextrw $1, %xmm1, %ecx
@@ -737,7 +743,7 @@ define <4 x i64> @load_sext_4i8_to_4i64(
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_4i8_to_4i64:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movd (%rdi), %xmm1
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,1,0]
@@ -761,7 +767,7 @@ define <4 x i64> @load_sext_4i8_to_4i64(
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i8_to_4i64:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: movd (%rdi), %xmm0
; SSE41-NEXT: pmovzxbd %xmm0, %xmm1
; SSE41-NEXT: pmovzxbq %xmm0, %xmm0
@@ -783,7 +789,7 @@ define <4 x i64> @load_sext_4i8_to_4i64(
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_4i8_to_4i64:
-; AVX1: ## BB#0:
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpmovsxbd (%rdi), %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
; AVX1-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -792,12 +798,12 @@ define <4 x i64> @load_sext_4i8_to_4i64(
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_4i8_to_4i64:
-; AVX2: ## BB#0:
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpmovsxbq (%rdi), %ymm0
; AVX2-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i8_to_4i64:
-; X32-SSE41: ## BB#0:
+; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movd (%eax), %xmm0
; X32-SSE41-NEXT: pmovzxbd %xmm0, %xmm1
@@ -824,6 +830,7 @@ define <4 x i64> @load_sext_4i8_to_4i64(
; X32-SSE41-NEXT: sarl $31, %eax
; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm1
; X32-SSE41-NEXT: retl
+entry:
%X = load <4 x i8>* %ptr
%Y = sext <4 x i8> %X to <4 x i64>
ret <4 x i64>%Y
@@ -831,7 +838,7 @@ define <4 x i64> @load_sext_4i8_to_4i64(
define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) {
; SSE2-LABEL: load_sext_4i16_to_4i64:
-; SSE2: ## BB#0:
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movq (%rdi), %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,1,0]
@@ -855,7 +862,7 @@ define <4 x i64> @load_sext_4i16_to_4i64
; SSE2-NEXT: retq
;
; SSSE3-LABEL: load_sext_4i16_to_4i64:
-; SSSE3: ## BB#0:
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movq (%rdi), %xmm1
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,1,0]
@@ -879,7 +886,7 @@ define <4 x i64> @load_sext_4i16_to_4i64
; SSSE3-NEXT: retq
;
; SSE41-LABEL: load_sext_4i16_to_4i64:
-; SSE41: ## BB#0:
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: movq (%rdi), %xmm0
; SSE41-NEXT: pmovzxwd %xmm0, %xmm1
; SSE41-NEXT: pmovzxwq %xmm0, %xmm0
@@ -901,7 +908,7 @@ define <4 x i64> @load_sext_4i16_to_4i64
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_sext_4i16_to_4i64:
-; AVX1: ## BB#0:
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpmovsxwd (%rdi), %xmm0
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
; AVX1-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -910,12 +917,12 @@ define <4 x i64> @load_sext_4i16_to_4i64
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_sext_4i16_to_4i64:
-; AVX2: ## BB#0:
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpmovsxwq (%rdi), %ymm0
; AVX2-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i16_to_4i64:
-; X32-SSE41: ## BB#0:
+; X32-SSE41: # BB#0: # %entry
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE41-NEXT: movsd (%eax), %xmm0
; X32-SSE41-NEXT: pmovzxwd %xmm0, %xmm1
@@ -942,6 +949,7 @@ define <4 x i64> @load_sext_4i16_to_4i64
; X32-SSE41-NEXT: sarl $31, %eax
; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm1
; X32-SSE41-NEXT: retl
+entry:
%X = load <4 x i16>* %ptr
%Y = sext <4 x i16> %X to <4 x i64>
ret <4 x i64>%Y
Modified: llvm/trunk/test/CodeGen/X86/vector-zext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-zext.ll?rev=218854&r1=218853&r2=218854&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-zext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-zext.ll Thu Oct 2 01:52:19 2014
@@ -1,12 +1,12 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_8i16_to_8i32:
-; SSE2: ## BB#0: ## %entry
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
@@ -17,7 +17,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i16_to_8i32:
-; SSSE3: ## BB#0: ## %entry
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
@@ -28,7 +28,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i16_to_8i32:
-; SSE41: ## BB#0: ## %entry
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: pmovzxwd %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE41-NEXT: pand %xmm1, %xmm2
@@ -38,7 +38,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i16_to_8i32:
-; AVX1: ## BB#0: ## %entry
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
@@ -46,7 +46,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i16_to_8i32:
-; AVX2: ## BB#0: ## %entry
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpmovzxwd %xmm0, %ymm0
; AVX2-NEXT: retq
entry:
@@ -56,7 +56,7 @@ entry:
define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
; SSE2-LABEL: zext_4i32_to_4i64:
-; SSE2: ## BB#0: ## %entry
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295]
; SSE2-NEXT: pand %xmm3, %xmm2
@@ -66,7 +66,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_4i32_to_4i64:
-; SSSE3: ## BB#0: ## %entry
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,0]
; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295]
; SSSE3-NEXT: pand %xmm3, %xmm2
@@ -76,7 +76,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_4i32_to_4i64:
-; SSE41: ## BB#0: ## %entry
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: pmovzxdq %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295]
; SSE41-NEXT: pand %xmm3, %xmm2
@@ -86,7 +86,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_4i32_to_4i64:
-; AVX1: ## BB#0: ## %entry
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
@@ -94,7 +94,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_4i32_to_4i64:
-; AVX2: ## BB#0: ## %entry
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpmovzxdq %xmm0, %ymm0
; AVX2-NEXT: retq
entry:
@@ -104,7 +104,7 @@ entry:
define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
; SSE2-LABEL: zext_8i8_to_8i32:
-; SSE2: ## BB#0: ## %entry
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255]
@@ -115,7 +115,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_8i8_to_8i32:
-; SSSE3: ## BB#0: ## %entry
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255]
@@ -126,7 +126,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_8i8_to_8i32:
-; SSE41: ## BB#0: ## %entry
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: pmovzxwd %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255]
; SSE41-NEXT: pand %xmm1, %xmm2
@@ -136,17 +136,17 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_8i8_to_8i32:
-; AVX1: ## BB#0: ## %entry
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpmovzxwd %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vandps LCPI2_0(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vandps .{{.*}}, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i8_to_8i32:
-; AVX2: ## BB#0: ## %entry
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpmovzxwd %xmm0, %ymm0
-; AVX2-NEXT: vpbroadcastd LCPI2_0(%rip), %ymm1
+; AVX2-NEXT: vpbroadcastd .{{.*}}, %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
entry:
@@ -157,7 +157,7 @@ entry:
; PR17654
define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %z) {
; SSE2-LABEL: zext_16i8_to_16i16:
-; SSE2: ## BB#0: ## %entry
+; SSE2: # BB#0: # %entry
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
@@ -168,7 +168,7 @@ define <16 x i16> @zext_16i8_to_16i16(<1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: zext_16i8_to_16i16:
-; SSSE3: ## BB#0: ## %entry
+; SSSE3: # BB#0: # %entry
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
@@ -179,7 +179,7 @@ define <16 x i16> @zext_16i8_to_16i16(<1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: zext_16i8_to_16i16:
-; SSE41: ## BB#0: ## %entry
+; SSE41: # BB#0: # %entry
; SSE41-NEXT: pmovzxbw %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
; SSE41-NEXT: pand %xmm1, %xmm2
@@ -189,7 +189,7 @@ define <16 x i16> @zext_16i8_to_16i16(<1
; SSE41-NEXT: retq
;
; AVX1-LABEL: zext_16i8_to_16i16:
-; AVX1: ## BB#0: ## %entry
+; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -197,7 +197,7 @@ define <16 x i16> @zext_16i8_to_16i16(<1
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_16i16:
-; AVX2: ## BB#0: ## %entry
+; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpmovzxbw %xmm0, %ymm0
; AVX2-NEXT: retq
entry:
More information about the llvm-commits
mailing list