[llvm] r257264 - [X86][AVX] Add support for i64 broadcast loads on 32-bit targets
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sat Jan 9 11:59:28 PST 2016
Author: rksimon
Date: Sat Jan 9 13:59:27 2016
New Revision: 257264
URL: http://llvm.org/viewvc/llvm-project?rev=257264&view=rev
Log:
[X86][AVX] Add support for i64 broadcast loads on 32-bit targets
Added 32-bit AVX1/AVX2 broadcast tests.
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll
llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=257264&r1=257263&r2=257264&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sat Jan 9 13:59:27 2016
@@ -8173,6 +8173,8 @@ static SDValue lowerVectorShuffleAsBroad
DL, VT, V.getOperand(0), BroadcastIdx, Subtarget, DAG))
return TruncBroadcast;
+ MVT BroadcastVT = VT;
+
// Also check the simpler case, where we can directly reuse the scalar.
if (V.getOpcode() == ISD::BUILD_VECTOR ||
(V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
@@ -8183,12 +8185,16 @@ static SDValue lowerVectorShuffleAsBroad
if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
return SDValue();
} else if (MayFoldLoad(V) && !cast<LoadSDNode>(V)->isVolatile()) {
+ // 32-bit targets need to load i64 as a f64 and then bitcast the result.
+ if (!Subtarget->is64Bit() && VT.getScalarType() == MVT::i64)
+ BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
+
// If we are broadcasting a load that is only used by the shuffle
// then we can reduce the vector load to the broadcasted scalar load.
LoadSDNode *Ld = cast<LoadSDNode>(V);
SDValue BaseAddr = Ld->getOperand(1);
EVT AddrVT = BaseAddr.getValueType();
- EVT SVT = VT.getScalarType();
+ EVT SVT = BroadcastVT.getScalarType();
unsigned Offset = BroadcastIdx * SVT.getStoreSize();
SDValue NewAddr = DAG.getNode(
ISD::ADD, DL, AddrVT, BaseAddr,
@@ -8202,7 +8208,8 @@ static SDValue lowerVectorShuffleAsBroad
return SDValue();
}
- return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
+ V = DAG.getNode(X86ISD::VBROADCAST, DL, BroadcastVT, V);
+ return DAG.getBitcast(VT, V);
}
// Check for whether we can use INSERTPS to perform the shuffle. We only use
Modified: llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll?rev=257264&r1=257263&r2=257264&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll Sat Jan 9 13:59:27 2016
@@ -1,11 +1,24 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=X64
define <4 x i64> @A(i64* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: A:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: A:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl (%eax), %ecx
+; X32-NEXT: movl 4(%eax), %eax
+; X32-NEXT: vmovd %ecx, %xmm0
+; X32-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: A:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastsd (%rdi), %ymm0
+; X64-NEXT: retq
entry:
%q = load i64, i64* %ptr, align 8
%vecinit.i = insertelement <4 x i64> undef, i64 %q, i32 0
@@ -16,10 +29,16 @@ entry:
}
define <8 x i32> @B(i32* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: B:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss (%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: B:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss (%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: B:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss (%rdi), %ymm0
+; X64-NEXT: retq
entry:
%q = load i32, i32* %ptr, align 4
%vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
@@ -30,10 +49,16 @@ entry:
}
define <4 x double> @C(double* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: C:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: C:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastsd (%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: C:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastsd (%rdi), %ymm0
+; X64-NEXT: retq
entry:
%q = load double, double* %ptr, align 8
%vecinit.i = insertelement <4 x double> undef, double %q, i32 0
@@ -44,10 +69,16 @@ entry:
}
define <8 x float> @D(float* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: D:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss (%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: D:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss (%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: D:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss (%rdi), %ymm0
+; X64-NEXT: retq
entry:
%q = load float, float* %ptr, align 4
%vecinit.i = insertelement <8 x float> undef, float %q, i32 0
@@ -60,10 +91,16 @@ entry:
;;;; 128-bit versions
define <4 x float> @e(float* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: e:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss (%rdi), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: e:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: e:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss (%rdi), %xmm0
+; X64-NEXT: retq
entry:
%q = load float, float* %ptr, align 4
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
@@ -75,10 +112,15 @@ entry:
; Don't broadcast constants on pre-AVX2 hardware.
define <4 x float> @_e2(float* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: _e2:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vmovaps {{.*#+}} xmm0 = [-7.812500e-03,-7.812500e-03,-7.812500e-03,-7.812500e-03]
-; CHECK-NEXT: retq
+; X32-LABEL: _e2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: vmovaps {{.*#+}} xmm0 = [-7.812500e-03,-7.812500e-03,-7.812500e-03,-7.812500e-03]
+; X32-NEXT: retl
+;
+; X64-LABEL: _e2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vmovaps {{.*#+}} xmm0 = [-7.812500e-03,-7.812500e-03,-7.812500e-03,-7.812500e-03]
+; X64-NEXT: retq
entry:
%vecinit.i = insertelement <4 x float> undef, float 0xbf80000000000000, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float 0xbf80000000000000, i32 1
@@ -89,10 +131,16 @@ entry:
define <4 x i32> @F(i32* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: F:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss (%rdi), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: F:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: F:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss (%rdi), %xmm0
+; X64-NEXT: retq
entry:
%q = load i32, i32* %ptr, align 4
%vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
@@ -105,10 +153,16 @@ entry:
; FIXME: Pointer adjusted broadcasts
define <4 x i32> @load_splat_4i32_4i32_1111(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_4i32_4i32_1111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,1,1]
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_4i32_4i32_1111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,1,1]
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_4i32_4i32_1111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,1,1]
+; X64-NEXT: retq
entry:
%ld = load <4 x i32>, <4 x i32>* %ptr
%ret = shufflevector <4 x i32> %ld, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -116,11 +170,18 @@ entry:
}
define <8 x i32> @load_splat_8i32_4i32_33333333(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_8i32_4i32_33333333:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = mem[3,3,3,3]
-; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_8i32_4i32_33333333:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpermilps {{.*#+}} xmm0 = mem[3,3,3,3]
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_8i32_4i32_33333333:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpermilps {{.*#+}} xmm0 = mem[3,3,3,3]
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-NEXT: retq
entry:
%ld = load <4 x i32>, <4 x i32>* %ptr
%ret = shufflevector <4 x i32> %ld, <4 x i32> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
@@ -128,13 +189,22 @@ entry:
}
define <8 x i32> @load_splat_8i32_8i32_55555555(<8 x i32>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_8i32_8i32_55555555:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vmovaps (%rdi), %ymm0
-; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
-; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_8i32_8i32_55555555:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vmovaps (%eax), %ymm0
+; X32-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_8i32_8i32_55555555:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vmovaps (%rdi), %ymm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-NEXT: retq
entry:
%ld = load <8 x i32>, <8 x i32>* %ptr
%ret = shufflevector <8 x i32> %ld, <8 x i32> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
@@ -142,10 +212,16 @@ entry:
}
define <4 x float> @load_splat_4f32_4f32_1111(<4 x float>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_4f32_4f32_1111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss 4(%rdi), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_4f32_4f32_1111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss 4(%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_4f32_4f32_1111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss 4(%rdi), %xmm0
+; X64-NEXT: retq
entry:
%ld = load <4 x float>, <4 x float>* %ptr
%ret = shufflevector <4 x float> %ld, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -153,10 +229,16 @@ entry:
}
define <8 x float> @load_splat_8f32_4f32_33333333(<4 x float>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_8f32_4f32_33333333:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss 12(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_8f32_4f32_33333333:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss 12(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_8f32_4f32_33333333:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss 12(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <4 x float>, <4 x float>* %ptr
%ret = shufflevector <4 x float> %ld, <4 x float> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
@@ -164,10 +246,16 @@ entry:
}
define <8 x float> @load_splat_8f32_8f32_55555555(<8 x float>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_8f32_8f32_55555555:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss 20(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_8f32_8f32_55555555:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss 20(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_8f32_8f32_55555555:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss 20(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <8 x float>, <8 x float>* %ptr
%ret = shufflevector <8 x float> %ld, <8 x float> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
@@ -175,10 +263,16 @@ entry:
}
define <2 x i64> @load_splat_2i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_2i64_2i64_1111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,3,2,3]
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_2i64_2i64_1111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,3,2,3]
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_2i64_2i64_1111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = mem[2,3,2,3]
+; X64-NEXT: retq
entry:
%ld = load <2 x i64>, <2 x i64>* %ptr
%ret = shufflevector <2 x i64> %ld, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
@@ -186,12 +280,20 @@ entry:
}
define <4 x i64> @load_splat_4i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_4i64_2i64_1111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vmovaps (%rdi), %xmm0
-; CHECK-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
-; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_4i64_2i64_1111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vmovaps (%eax), %xmm0
+; X32-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_4i64_2i64_1111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vmovaps (%rdi), %xmm0
+; X64-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-NEXT: retq
entry:
%ld = load <2 x i64>, <2 x i64>* %ptr
%ret = shufflevector <2 x i64> %ld, <2 x i64> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -199,13 +301,22 @@ entry:
}
define <4 x i64> @load_splat_4i64_4i64_2222(<4 x i64>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_4i64_4i64_2222:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vmovapd (%rdi), %ymm0
-; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
-; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_4i64_4i64_2222:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vmovapd (%eax), %ymm0
+; X32-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_4i64_4i64_2222:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vmovapd (%rdi), %ymm0
+; X64-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; X64-NEXT: retq
entry:
%ld = load <4 x i64>, <4 x i64>* %ptr
%ret = shufflevector <4 x i64> %ld, <4 x i64> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
@@ -213,11 +324,18 @@ entry:
}
define <2 x double> @load_splat_2f64_2f64_1111(<2 x double>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_2f64_2f64_1111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vmovaps (%rdi), %xmm0
-; CHECK-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_2f64_2f64_1111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vmovaps (%eax), %xmm0
+; X32-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_2f64_2f64_1111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vmovaps (%rdi), %xmm0
+; X64-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X64-NEXT: retq
entry:
%ld = load <2 x double>, <2 x double>* %ptr
%ret = shufflevector <2 x double> %ld, <2 x double> undef, <2 x i32> <i32 1, i32 1>
@@ -225,10 +343,16 @@ entry:
}
define <4 x double> @load_splat_4f64_2f64_1111(<2 x double>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_4f64_2f64_1111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastsd 8(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_4f64_2f64_1111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastsd 8(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_4f64_2f64_1111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastsd 8(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <2 x double>, <2 x double>* %ptr
%ret = shufflevector <2 x double> %ld, <2 x double> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -236,10 +360,16 @@ entry:
}
define <4 x double> @load_splat_4f64_4f64_2222(<4 x double>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_4f64_4f64_2222:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastsd 16(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_4f64_4f64_2222:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastsd 16(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_4f64_4f64_2222:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastsd 16(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <4 x double>, <4 x double>* %ptr
%ret = shufflevector <4 x double> %ld, <4 x double> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
@@ -249,11 +379,22 @@ entry:
; Unsupported vbroadcasts
define <2 x i64> @G(i64* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: G:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; CHECK-NEXT: retq
+; X32-LABEL: G:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl (%eax), %ecx
+; X32-NEXT: movl 4(%eax), %eax
+; X32-NEXT: vmovd %ecx, %xmm0
+; X32-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: G:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; X64-NEXT: retq
entry:
%q = load i64, i64* %ptr, align 8
%vecinit.i = insertelement <2 x i64> undef, i64 %q, i32 0
@@ -262,20 +403,31 @@ entry:
}
define <4 x i32> @H(<4 x i32> %a) {
-; CHECK-LABEL: H:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; CHECK-NEXT: retq
+; X32-LABEL: H:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X32-NEXT: retl
+;
+; X64-LABEL: H:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X64-NEXT: retq
entry:
%x = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
ret <4 x i32> %x
}
define <2 x double> @I(double* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: I:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; CHECK-NEXT: retq
+; X32-LABEL: I:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X32-NEXT: retl
+;
+; X64-LABEL: I:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X64-NEXT: retq
entry:
%q = load double, double* %ptr, align 4
%vecinit.i = insertelement <2 x double> undef, double %q, i32 0
@@ -284,12 +436,21 @@ entry:
}
define <4 x float> @_RR(float* %ptr, i32* %k) nounwind uwtable readnone ssp {
-; CHECK-LABEL: _RR:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss (%rdi), %xmm0
-; CHECK-NEXT: movl (%rsi), %eax
-; CHECK-NEXT: movl %eax, (%rax)
-; CHECK-NEXT: retq
+; X32-LABEL: _RR:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: vbroadcastss (%ecx), %xmm0
+; X32-NEXT: movl (%eax), %eax
+; X32-NEXT: movl %eax, (%eax)
+; X32-NEXT: retl
+;
+; X64-LABEL: _RR:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss (%rdi), %xmm0
+; X64-NEXT: movl (%rsi), %eax
+; X64-NEXT: movl %eax, (%rax)
+; X64-NEXT: retq
entry:
%q = load float, float* %ptr, align 4
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
@@ -303,10 +464,16 @@ entry:
}
define <4 x float> @_RR2(float* %ptr, i32* %k) nounwind uwtable readnone ssp {
-; CHECK-LABEL: _RR2:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss (%rdi), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: _RR2:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _RR2:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss (%rdi), %xmm0
+; X64-NEXT: retq
entry:
%q = load float, float* %ptr, align 4
%v = insertelement <4 x float> undef, float %q, i32 0
@@ -319,10 +486,16 @@ entry:
; (via the insertelements).
define <8 x float> @splat_concat1(float* %p) {
-; CHECK-LABEL: splat_concat1:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastss (%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: splat_concat1:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss (%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: splat_concat1:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastss (%rdi), %ymm0
+; X64-NEXT: retq
%1 = load float, float* %p, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
%3 = insertelement <4 x float> %2, float %1, i32 1
@@ -333,10 +506,16 @@ define <8 x float> @splat_concat1(float*
}
define <8 x float> @splat_concat2(float* %p) {
-; CHECK-LABEL: splat_concat2:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastss (%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: splat_concat2:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss (%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: splat_concat2:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastss (%rdi), %ymm0
+; X64-NEXT: retq
%1 = load float, float* %p, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
%3 = insertelement <4 x float> %2, float %1, i32 1
@@ -351,10 +530,16 @@ define <8 x float> @splat_concat2(float*
}
define <4 x double> @splat_concat3(double* %p) {
-; CHECK-LABEL: splat_concat3:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: splat_concat3:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastsd (%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: splat_concat3:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastsd (%rdi), %ymm0
+; X64-NEXT: retq
%1 = load double, double* %p, align 8
%2 = insertelement <2 x double> undef, double %1, i32 0
%3 = insertelement <2 x double> %2, double %1, i32 1
@@ -363,10 +548,16 @@ define <4 x double> @splat_concat3(doubl
}
define <4 x double> @splat_concat4(double* %p) {
-; CHECK-LABEL: splat_concat4:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: splat_concat4:
+; X32: ## BB#0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastsd (%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: splat_concat4:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastsd (%rdi), %ymm0
+; X64-NEXT: retq
%1 = load double, double* %p, align 8
%2 = insertelement <2 x double> undef, double %1, i32 0
%3 = insertelement <2 x double> %2, double %1, i32 1
Modified: llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll?rev=257264&r1=257263&r2=257264&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll Sat Jan 9 13:59:27 2016
@@ -1,11 +1,18 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s
+; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=X64
define <16 x i8> @BB16(i8* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: BB16:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: BB16:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpbroadcastb (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: BB16:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpbroadcastb (%rdi), %xmm0
+; X64-NEXT: retq
entry:
%q = load i8, i8* %ptr, align 4
%q0 = insertelement <16 x i8> undef, i8 %q, i32 0
@@ -28,10 +35,16 @@ entry:
}
define <32 x i8> @BB32(i8* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: BB32:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: BB32:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpbroadcastb (%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: BB32:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpbroadcastb (%rdi), %ymm0
+; X64-NEXT: retq
entry:
%q = load i8, i8* %ptr, align 4
%q0 = insertelement <32 x i8> undef, i8 %q, i32 0
@@ -71,10 +84,16 @@ entry:
}
define <8 x i16> @W16(i16* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: W16:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: W16:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpbroadcastw (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: W16:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpbroadcastw (%rdi), %xmm0
+; X64-NEXT: retq
entry:
%q = load i16, i16* %ptr, align 4
%q0 = insertelement <8 x i16> undef, i16 %q, i32 0
@@ -89,10 +108,16 @@ entry:
}
define <16 x i16> @WW16(i16* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: WW16:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: WW16:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpbroadcastw (%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: WW16:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpbroadcastw (%rdi), %ymm0
+; X64-NEXT: retq
entry:
%q = load i16, i16* %ptr, align 4
%q0 = insertelement <16 x i16> undef, i16 %q, i32 0
@@ -115,10 +140,16 @@ entry:
}
define <4 x i32> @D32(i32* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: D32:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss (%rdi), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: D32:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss (%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: D32:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss (%rdi), %xmm0
+; X64-NEXT: retq
entry:
%q = load i32, i32* %ptr, align 4
%q0 = insertelement <4 x i32> undef, i32 %q, i32 0
@@ -129,10 +160,16 @@ entry:
}
define <8 x i32> @DD32(i32* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: DD32:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss (%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: DD32:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss (%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: DD32:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss (%rdi), %ymm0
+; X64-NEXT: retq
entry:
%q = load i32, i32* %ptr, align 4
%q0 = insertelement <8 x i32> undef, i32 %q, i32 0
@@ -147,10 +184,21 @@ entry:
}
define <2 x i64> @Q64(i64* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: Q64:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: Q64:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl (%eax), %ecx
+; X32-NEXT: movl 4(%eax), %eax
+; X32-NEXT: vmovd %ecx, %xmm0
+; X32-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: Q64:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpbroadcastq (%rdi), %xmm0
+; X64-NEXT: retq
entry:
%q = load i64, i64* %ptr, align 4
%q0 = insertelement <2 x i64> undef, i64 %q, i32 0
@@ -159,10 +207,22 @@ entry:
}
define <4 x i64> @QQ64(i64* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: QQ64:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: QQ64:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl (%eax), %ecx
+; X32-NEXT: movl 4(%eax), %eax
+; X32-NEXT: vmovd %ecx, %xmm0
+; X32-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
+; X32-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: QQ64:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastsd (%rdi), %ymm0
+; X64-NEXT: retq
entry:
%q = load i64, i64* %ptr, align 4
%q0 = insertelement <4 x i64> undef, i64 %q, i32 0
@@ -175,10 +235,16 @@ entry:
; FIXME: Pointer adjusted broadcasts
define <16 x i8> @load_splat_16i8_16i8_1111111111111111(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_16i8_16i8_1111111111111111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastb 1(%rdi), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_16i8_16i8_1111111111111111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpbroadcastb 1(%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_16i8_16i8_1111111111111111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpbroadcastb 1(%rdi), %xmm0
+; X64-NEXT: retq
entry:
%ld = load <16 x i8>, <16 x i8>* %ptr
%ret = shufflevector <16 x i8> %ld, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -186,10 +252,16 @@ entry:
}
define <32 x i8> @load_splat_32i8_16i8_11111111111111111111111111111111(<16 x i8>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_32i8_16i8_11111111111111111111111111111111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastb 1(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_32i8_16i8_11111111111111111111111111111111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpbroadcastb 1(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_32i8_16i8_11111111111111111111111111111111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpbroadcastb 1(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <16 x i8>, <16 x i8>* %ptr
%ret = shufflevector <16 x i8> %ld, <16 x i8> undef, <32 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -197,10 +269,16 @@ entry:
}
define <32 x i8> @load_splat_32i8_32i8_11111111111111111111111111111111(<32 x i8>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_32i8_32i8_11111111111111111111111111111111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastb 1(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_32i8_32i8_11111111111111111111111111111111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpbroadcastb 1(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_32i8_32i8_11111111111111111111111111111111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpbroadcastb 1(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <32 x i8>, <32 x i8>* %ptr
%ret = shufflevector <32 x i8> %ld, <32 x i8> undef, <32 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -208,10 +286,16 @@ entry:
}
define <8 x i16> @load_splat_8i16_8i16_11111111(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_8i16_8i16_11111111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastw 2(%rdi), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_8i16_8i16_11111111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpbroadcastw 2(%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_8i16_8i16_11111111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpbroadcastw 2(%rdi), %xmm0
+; X64-NEXT: retq
entry:
%ld = load <8 x i16>, <8 x i16>* %ptr
%ret = shufflevector <8 x i16> %ld, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -219,10 +303,16 @@ entry:
}
define <16 x i16> @load_splat_16i16_8i16_1111111111111111(<8 x i16>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_16i16_8i16_1111111111111111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastw 2(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_16i16_8i16_1111111111111111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpbroadcastw 2(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_16i16_8i16_1111111111111111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpbroadcastw 2(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <8 x i16>, <8 x i16>* %ptr
%ret = shufflevector <8 x i16> %ld, <8 x i16> undef, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -230,10 +320,16 @@ entry:
}
define <16 x i16> @load_splat_16i16_16i16_1111111111111111(<16 x i16>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_16i16_16i16_1111111111111111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastw 2(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_16i16_16i16_1111111111111111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vpbroadcastw 2(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_16i16_16i16_1111111111111111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpbroadcastw 2(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <16 x i16>, <16 x i16>* %ptr
%ret = shufflevector <16 x i16> %ld, <16 x i16> undef, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -241,10 +337,16 @@ entry:
}
define <4 x i32> @load_splat_4i32_4i32_1111(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_4i32_4i32_1111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss 4(%rdi), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_4i32_4i32_1111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss 4(%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_4i32_4i32_1111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss 4(%rdi), %xmm0
+; X64-NEXT: retq
entry:
%ld = load <4 x i32>, <4 x i32>* %ptr
%ret = shufflevector <4 x i32> %ld, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -252,10 +354,16 @@ entry:
}
define <8 x i32> @load_splat_8i32_4i32_33333333(<4 x i32>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_8i32_4i32_33333333:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss 12(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_8i32_4i32_33333333:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss 12(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_8i32_4i32_33333333:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss 12(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <4 x i32>, <4 x i32>* %ptr
%ret = shufflevector <4 x i32> %ld, <4 x i32> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
@@ -263,10 +371,16 @@ entry:
}
define <8 x i32> @load_splat_8i32_8i32_55555555(<8 x i32>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_8i32_8i32_55555555:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss 20(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_8i32_8i32_55555555:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss 20(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_8i32_8i32_55555555:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss 20(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <8 x i32>, <8 x i32>* %ptr
%ret = shufflevector <8 x i32> %ld, <8 x i32> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
@@ -274,10 +388,16 @@ entry:
}
define <4 x float> @load_splat_4f32_4f32_1111(<4 x float>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_4f32_4f32_1111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss 4(%rdi), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_4f32_4f32_1111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss 4(%eax), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_4f32_4f32_1111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss 4(%rdi), %xmm0
+; X64-NEXT: retq
entry:
%ld = load <4 x float>, <4 x float>* %ptr
%ret = shufflevector <4 x float> %ld, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -285,10 +405,16 @@ entry:
}
define <8 x float> @load_splat_8f32_4f32_33333333(<4 x float>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_8f32_4f32_33333333:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss 12(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_8f32_4f32_33333333:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss 12(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_8f32_4f32_33333333:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss 12(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <4 x float>, <4 x float>* %ptr
%ret = shufflevector <4 x float> %ld, <4 x float> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
@@ -296,10 +422,16 @@ entry:
}
define <8 x float> @load_splat_8f32_8f32_55555555(<8 x float>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_8f32_8f32_55555555:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss 20(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_8f32_8f32_55555555:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastss 20(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_8f32_8f32_55555555:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss 20(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <8 x float>, <8 x float>* %ptr
%ret = shufflevector <8 x float> %ld, <8 x float> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
@@ -307,10 +439,17 @@ entry:
}
define <2 x i64> @load_splat_2i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_2i64_2i64_1111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastq 8(%rdi), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_2i64_2i64_1111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_2i64_2i64_1111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpbroadcastq 8(%rdi), %xmm0
+; X64-NEXT: retq
entry:
%ld = load <2 x i64>, <2 x i64>* %ptr
%ret = shufflevector <2 x i64> %ld, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
@@ -318,10 +457,16 @@ entry:
}
define <4 x i64> @load_splat_4i64_2i64_1111(<2 x i64>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_4i64_2i64_1111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastsd 8(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_4i64_2i64_1111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastsd 8(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_4i64_2i64_1111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastsd 8(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <2 x i64>, <2 x i64>* %ptr
%ret = shufflevector <2 x i64> %ld, <2 x i64> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -329,10 +474,16 @@ entry:
}
define <4 x i64> @load_splat_4i64_4i64_2222(<4 x i64>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_4i64_4i64_2222:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastsd 16(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_4i64_4i64_2222:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastsd 16(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_4i64_4i64_2222:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastsd 16(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <4 x i64>, <4 x i64>* %ptr
%ret = shufflevector <4 x i64> %ld, <4 x i64> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
@@ -340,11 +491,18 @@ entry:
}
define <2 x double> @load_splat_2f64_2f64_1111(<2 x double>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_2f64_2f64_1111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vmovaps (%rdi), %xmm0
-; CHECK-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_2f64_2f64_1111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vmovaps (%eax), %xmm0
+; X32-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_2f64_2f64_1111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vmovaps (%rdi), %xmm0
+; X64-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X64-NEXT: retq
entry:
%ld = load <2 x double>, <2 x double>* %ptr
%ret = shufflevector <2 x double> %ld, <2 x double> undef, <2 x i32> <i32 1, i32 1>
@@ -352,10 +510,16 @@ entry:
}
define <4 x double> @load_splat_4f64_2f64_1111(<2 x double>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_4f64_2f64_1111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastsd 8(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_4f64_2f64_1111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastsd 8(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_4f64_2f64_1111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastsd 8(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <2 x double>, <2 x double>* %ptr
%ret = shufflevector <2 x double> %ld, <2 x double> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -363,10 +527,16 @@ entry:
}
define <4 x double> @load_splat_4f64_4f64_2222(<4 x double>* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: load_splat_4f64_4f64_2222:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastsd 16(%rdi), %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: load_splat_4f64_4f64_2222:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vbroadcastsd 16(%eax), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: load_splat_4f64_4f64_2222:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastsd 16(%rdi), %ymm0
+; X64-NEXT: retq
entry:
%ld = load <4 x double>, <4 x double>* %ptr
%ret = shufflevector <4 x double> %ld, <4 x double> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
@@ -376,10 +546,16 @@ entry:
; make sure that we still don't support broadcast double into 128-bit vector
; this used to crash
define <2 x double> @I(double* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: I:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
-; CHECK-NEXT: retq
+; X32-LABEL: I:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X32-NEXT: retl
+;
+; X64-LABEL: I:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X64-NEXT: retq
entry:
%q = load double, double* %ptr, align 4
%vecinit.i = insertelement <2 x double> undef, double %q, i32 0
@@ -388,32 +564,49 @@ entry:
}
define <8 x i32> @V111(<8 x i32> %in) nounwind uwtable readnone ssp {
-; CHECK-LABEL: V111:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
-; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: V111:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: vpbroadcastd LCPI27_0, %ymm1
+; X32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: V111:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; X64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; X64-NEXT: retq
entry:
%g = add <8 x i32> %in, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
ret <8 x i32> %g
}
define <8 x float> @V113(<8 x float> %in) nounwind uwtable readnone ssp {
-; CHECK-LABEL: V113:
-; CHECK: ## BB#0: ## %entry
-; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
-; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: V113:
+; X32: ## BB#0: ## %entry
+; X32-NEXT: vbroadcastss LCPI28_0, %ymm1
+; X32-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: V113:
+; X64: ## BB#0: ## %entry
+; X64-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
+; X64-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; X64-NEXT: retq
entry:
%g = fadd <8 x float> %in, <float 0xbf80000000000000, float 0xbf80000000000000, float 0xbf80000000000000, float 0xbf80000000000000, float 0xbf80000000000000, float 0xbf80000000000000, float 0xbf80000000000000, float 0xbf80000000000000>
ret <8 x float> %g
}
define <4 x float> @_e2(float* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: _e2:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: _e2:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastss LCPI29_0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _e2:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastss {{.*}}(%rip), %xmm0
+; X64-NEXT: retq
%vecinit.i = insertelement <4 x float> undef, float 0xbf80000000000000, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float 0xbf80000000000000, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float 0xbf80000000000000, i32 2
@@ -422,10 +615,15 @@ define <4 x float> @_e2(float* %ptr) nou
}
define <8 x i8> @_e4(i8* %ptr) nounwind uwtable readnone ssp {
-; CHECK-LABEL: _e4:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vmovaps {{.*#+}} xmm0 = [52,52,52,52,52,52,52,52]
-; CHECK-NEXT: retq
+; X32-LABEL: _e4:
+; X32: ## BB#0:
+; X32-NEXT: vmovaps {{.*#+}} xmm0 = [52,52,52,52,52,52,52,52]
+; X32-NEXT: retl
+;
+; X64-LABEL: _e4:
+; X64: ## BB#0:
+; X64-NEXT: vmovaps {{.*#+}} xmm0 = [52,52,52,52,52,52,52,52]
+; X64-NEXT: retq
%vecinit0.i = insertelement <8 x i8> undef, i8 52, i32 0
%vecinit1.i = insertelement <8 x i8> %vecinit0.i, i8 52, i32 1
%vecinit2.i = insertelement <8 x i8> %vecinit1.i, i8 52, i32 2
@@ -437,19 +635,30 @@ define <8 x i8> @_e4(i8* %ptr) nounwind
ret <8 x i8> %vecinit7.i
}
-
define void @crash() nounwind alwaysinline {
-; CHECK-LABEL: crash:
-; CHECK: ## BB#0: ## %WGLoopsEntry
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: je LBB31_1
-; CHECK-NEXT: ## BB#2: ## %ret
-; CHECK-NEXT: retq
-; CHECK-NEXT: .align 4, 0x90
-; CHECK-NEXT: LBB31_1: ## %footer349VF
-; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: jmp LBB31_1
+; X32-LABEL: crash:
+; X32: ## BB#0: ## %WGLoopsEntry
+; X32-NEXT: xorl %eax, %eax
+; X32-NEXT: testb %al, %al
+; X32-NEXT: je LBB31_1
+; X32-NEXT: ## BB#2: ## %ret
+; X32-NEXT: retl
+; X32-NEXT: .align 4, 0x90
+; X32-NEXT: LBB31_1: ## %footer349VF
+; X32-NEXT: ## =>This Inner Loop Header: Depth=1
+; X32-NEXT: jmp LBB31_1
+;
+; X64-LABEL: crash:
+; X64: ## BB#0: ## %WGLoopsEntry
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: testb %al, %al
+; X64-NEXT: je LBB31_1
+; X64-NEXT: ## BB#2: ## %ret
+; X64-NEXT: retq
+; X64-NEXT: .align 4, 0x90
+; X64-NEXT: LBB31_1: ## %footer349VF
+; X64-NEXT: ## =>This Inner Loop Header: Depth=1
+; X64-NEXT: jmp LBB31_1
WGLoopsEntry:
br i1 undef, label %ret, label %footer329VF
@@ -477,150 +686,230 @@ ret:
}
define <8 x i32> @_inreg0(i32 %scalar) nounwind uwtable readnone ssp {
-; CHECK-LABEL: _inreg0:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vmovd %edi, %xmm0
-; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg0:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg0:
+; X64: ## BB#0:
+; X64-NEXT: vmovd %edi, %xmm0
+; X64-NEXT: vbroadcastss %xmm0, %ymm0
+; X64-NEXT: retq
%in = insertelement <8 x i32> undef, i32 %scalar, i32 0
%wide = shufflevector <8 x i32> %in, <8 x i32> undef, <8 x i32> zeroinitializer
ret <8 x i32> %wide
}
define <8 x float> @_inreg1(float %scalar) nounwind uwtable readnone ssp {
-; CHECK-LABEL: _inreg1:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg1:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg1:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastss %xmm0, %ymm0
+; X64-NEXT: retq
%in = insertelement <8 x float> undef, float %scalar, i32 0
%wide = shufflevector <8 x float> %in, <8 x float> undef, <8 x i32> zeroinitializer
ret <8 x float> %wide
}
define <4 x float> @_inreg2(float %scalar) nounwind uwtable readnone ssp {
-; CHECK-LABEL: _inreg2:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg2:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg2:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastss %xmm0, %xmm0
+; X64-NEXT: retq
%in = insertelement <4 x float> undef, float %scalar, i32 0
%wide = shufflevector <4 x float> %in, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %wide
}
define <4 x double> @_inreg3(double %scalar) nounwind uwtable readnone ssp {
-; CHECK-LABEL: _inreg3:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg3:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg3:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastsd %xmm0, %ymm0
+; X64-NEXT: retq
%in = insertelement <4 x double> undef, double %scalar, i32 0
%wide = shufflevector <4 x double> %in, <4 x double> undef, <4 x i32> zeroinitializer
ret <4 x double> %wide
}
define <8 x float> @_inreg8xfloat(<8 x float> %a) {
-; CHECK-LABEL: _inreg8xfloat:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg8xfloat:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastss %xmm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg8xfloat:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastss %xmm0, %ymm0
+; X64-NEXT: retq
%b = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> zeroinitializer
ret <8 x float> %b
}
define <4 x float> @_inreg4xfloat(<4 x float> %a) {
-; CHECK-LABEL: _inreg4xfloat:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg4xfloat:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastss %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg4xfloat:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastss %xmm0, %xmm0
+; X64-NEXT: retq
%b = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %b
}
define <16 x i16> @_inreg16xi16(<16 x i16> %a) {
-; CHECK-LABEL: _inreg16xi16:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpbroadcastw %xmm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg16xi16:
+; X32: ## BB#0:
+; X32-NEXT: vpbroadcastw %xmm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg16xi16:
+; X64: ## BB#0:
+; X64-NEXT: vpbroadcastw %xmm0, %ymm0
+; X64-NEXT: retq
%b = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> zeroinitializer
ret <16 x i16> %b
}
define <8 x i16> @_inreg8xi16(<8 x i16> %a) {
-; CHECK-LABEL: _inreg8xi16:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpbroadcastw %xmm0, %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg8xi16:
+; X32: ## BB#0:
+; X32-NEXT: vpbroadcastw %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg8xi16:
+; X64: ## BB#0:
+; X64-NEXT: vpbroadcastw %xmm0, %xmm0
+; X64-NEXT: retq
%b = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> zeroinitializer
ret <8 x i16> %b
}
define <4 x i64> @_inreg4xi64(<4 x i64> %a) {
-; CHECK-LABEL: _inreg4xi64:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg4xi64:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastsd %xmm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg4xi64:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastsd %xmm0, %ymm0
+; X64-NEXT: retq
%b = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> zeroinitializer
ret <4 x i64> %b
}
define <2 x i64> @_inreg2xi64(<2 x i64> %a) {
-; CHECK-LABEL: _inreg2xi64:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg2xi64:
+; X32: ## BB#0:
+; X32-NEXT: vpbroadcastq %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg2xi64:
+; X64: ## BB#0:
+; X64-NEXT: vpbroadcastq %xmm0, %xmm0
+; X64-NEXT: retq
%b = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> %b
}
define <4 x double> @_inreg4xdouble(<4 x double> %a) {
-; CHECK-LABEL: _inreg4xdouble:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg4xdouble:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastsd %xmm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg4xdouble:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastsd %xmm0, %ymm0
+; X64-NEXT: retq
%b = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> zeroinitializer
ret <4 x double> %b
}
define <2 x double> @_inreg2xdouble(<2 x double> %a) {
-; CHECK-LABEL: _inreg2xdouble:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg2xdouble:
+; X32: ## BB#0:
+; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg2xdouble:
+; X64: ## BB#0:
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64-NEXT: retq
%b = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> zeroinitializer
ret <2 x double> %b
}
define <8 x i32> @_inreg8xi32(<8 x i32> %a) {
-; CHECK-LABEL: _inreg8xi32:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg8xi32:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastss %xmm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg8xi32:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastss %xmm0, %ymm0
+; X64-NEXT: retq
%b = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> zeroinitializer
ret <8 x i32> %b
}
define <4 x i32> @_inreg4xi32(<4 x i32> %a) {
-; CHECK-LABEL: _inreg4xi32:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg4xi32:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastss %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg4xi32:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastss %xmm0, %xmm0
+; X64-NEXT: retq
%b = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> zeroinitializer
ret <4 x i32> %b
}
define <32 x i8> @_inreg32xi8(<32 x i8> %a) {
-; CHECK-LABEL: _inreg32xi8:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpbroadcastb %xmm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg32xi8:
+; X32: ## BB#0:
+; X32-NEXT: vpbroadcastb %xmm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg32xi8:
+; X64: ## BB#0:
+; X64-NEXT: vpbroadcastb %xmm0, %ymm0
+; X64-NEXT: retq
%b = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> zeroinitializer
ret <32 x i8> %b
}
define <16 x i8> @_inreg16xi8(<16 x i8> %a) {
-; CHECK-LABEL: _inreg16xi8:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vpbroadcastb %xmm0, %xmm0
-; CHECK-NEXT: retq
+; X32-LABEL: _inreg16xi8:
+; X32: ## BB#0:
+; X32-NEXT: vpbroadcastb %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: _inreg16xi8:
+; X64: ## BB#0:
+; X64-NEXT: vpbroadcastb %xmm0, %xmm0
+; X64-NEXT: retq
%b = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> zeroinitializer
ret <16 x i8> %b
}
@@ -630,10 +919,15 @@ define <16 x i8> @_inreg16xi8(<16 x i8
; (via the insertelements).
define <8 x float> @splat_concat1(float %f) {
-; CHECK-LABEL: splat_concat1:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: splat_concat1:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: splat_concat1:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastss %xmm0, %ymm0
+; X64-NEXT: retq
%1 = insertelement <4 x float> undef, float %f, i32 0
%2 = insertelement <4 x float> %1, float %f, i32 1
%3 = insertelement <4 x float> %2, float %f, i32 2
@@ -643,10 +937,15 @@ define <8 x float> @splat_concat1(float
}
define <8 x float> @splat_concat2(float %f) {
-; CHECK-LABEL: splat_concat2:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: splat_concat2:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: splat_concat2:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastss %xmm0, %ymm0
+; X64-NEXT: retq
%1 = insertelement <4 x float> undef, float %f, i32 0
%2 = insertelement <4 x float> %1, float %f, i32 1
%3 = insertelement <4 x float> %2, float %f, i32 2
@@ -660,10 +959,15 @@ define <8 x float> @splat_concat2(float
}
define <4 x double> @splat_concat3(double %d) {
-; CHECK-LABEL: splat_concat3:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: splat_concat3:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: splat_concat3:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastsd %xmm0, %ymm0
+; X64-NEXT: retq
%1 = insertelement <2 x double> undef, double %d, i32 0
%2 = insertelement <2 x double> %1, double %d, i32 1
%3 = shufflevector <2 x double> %2, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -671,10 +975,15 @@ define <4 x double> @splat_concat3(doubl
}
define <4 x double> @splat_concat4(double %d) {
-; CHECK-LABEL: splat_concat4:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
-; CHECK-NEXT: retq
+; X32-LABEL: splat_concat4:
+; X32: ## BB#0:
+; X32-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: splat_concat4:
+; X64: ## BB#0:
+; X64-NEXT: vbroadcastsd %xmm0, %ymm0
+; X64-NEXT: retq
%1 = insertelement <2 x double> undef, double %d, i32 0
%2 = insertelement <2 x double> %1, double %d, i32 1
%3 = insertelement <2 x double> undef, double %d, i32 0
@@ -805,9 +1114,9 @@ eintry:
ret void
}
-; CHECK-LABEL: isel_crash_2q
-; CHECK: vpbroadcastq {{[^,]+}}, %xmm{{[0-9]+}}
-; CHECK: ret
+; X64-LABEL: isel_crash_2q
+; X64: vpbroadcastq {{[^,]+}}, %xmm{{[0-9]+}}
+; X64: ret
define void @isel_crash_2q(i64* %cV_R.addr) {
entry:
%__a.addr.i = alloca <2 x i64>, align 16
@@ -823,9 +1132,9 @@ entry:
ret void
}
-; CHECK-LABEL: isel_crash_4q
-; CHECK: vbroadcastsd {{[^,]+}}, %ymm{{[0-9]+}}
-; CHECK: ret
+; X64-LABEL: isel_crash_4q
+; X64: vbroadcastsd {{[^,]+}}, %ymm{{[0-9]+}}
+; X64: ret
define void @isel_crash_4q(i64* %cV_R.addr) {
eintry:
%__a.addr.i = alloca <4 x i64>, align 16
More information about the llvm-commits
mailing list