[llvm] r276416 - [X86][AVX] Added support for lowering to VBROADCASTF128/VBROADCASTI128 (reapplied)
Igor Laevsky via llvm-commits
llvm-commits at lists.llvm.org
Fri Jul 29 08:59:10 PDT 2016
Hi Simon,
It looks like one of our internal tests had started to fail after this change. Exposed issue is described in the https://llvm.org/bugs/show_bug.cgi?id=28770
Could you take a look at this please?
— Igor
On 22 Jul 2016, at 16:58, Simon Pilgrim via llvm-commits <llvm-commits at lists.llvm.org> wrote:
> Author: rksimon
> Date: Fri Jul 22 08:58:44 2016
> New Revision: 276416
>
> URL: http://llvm.org/viewvc/llvm-project?rev=276416&view=rev
> Log:
> [X86][AVX] Added support for lowering to VBROADCASTF128/VBROADCASTI128 (reapplied)
>
> As reported on PR26235, we don't currently make use of the VBROADCASTF128/VBROADCASTI128 instructions (or the AVX512 equivalents) to load+splat a 128-bit vector to both lanes of a 256-bit vector.
>
> This patch enables lowering from subvector insertion/concatenation patterns and auto-upgrades the llvm.x86.avx.vbroadcastf128.pd.256 / llvm.x86.avx.vbroadcastf128.ps.256 intrinsics to match.
>
> We could possibly investigate using VBROADCASTF128/VBROADCASTI128 to load repeated constants as well (similar to how we already do for scalar broadcasts).
>
> Reapplied with fix for PR28657 - removed intrinsic definitions (clang companion patch to be be submitted shortly).
>
> Differential Revision: https://reviews.llvm.org/D22460
>
> Modified:
> llvm/trunk/include/llvm/IR/IntrinsicsX86.td
> llvm/trunk/lib/IR/AutoUpgrade.cpp
> llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
> llvm/trunk/lib/Target/X86/X86InstrAVX512.td
> llvm/trunk/lib/Target/X86/X86InstrSSE.td
> llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
> llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
> llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll
> llvm/trunk/test/CodeGen/X86/avx-vbroadcastf128.ll
> llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
> llvm/trunk/test/CodeGen/X86/avx2-vbroadcasti128.ll
> llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll
> llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
>
> Modified: llvm/trunk/include/llvm/IR/IntrinsicsX86.td
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IntrinsicsX86.td?rev=276416&r1=276415&r2=276416&view=diff
> ==============================================================================
> --- llvm/trunk/include/llvm/IR/IntrinsicsX86.td (original)
> +++ llvm/trunk/include/llvm/IR/IntrinsicsX86.td Fri Jul 22 08:58:44 2016
> @@ -1694,16 +1694,6 @@ let TargetPrefix = "x86" in { // All in
> Intrinsic<[], [], []>;
> }
>
> -// Vector load with broadcast
> -let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
> - def int_x86_avx_vbroadcastf128_pd_256 :
> - GCCBuiltin<"__builtin_ia32_vbroadcastf128_pd256">,
> - Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
> - def int_x86_avx_vbroadcastf128_ps_256 :
> - GCCBuiltin<"__builtin_ia32_vbroadcastf128_ps256">,
> - Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>;
> -}
> -
> // SIMD load ops
> let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
> def int_x86_avx_ldu_dq_256 : GCCBuiltin<"__builtin_ia32_lddqu256">,
>
> Modified: llvm/trunk/lib/IR/AutoUpgrade.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/AutoUpgrade.cpp?rev=276416&r1=276415&r2=276416&view=diff
> ==============================================================================
> --- llvm/trunk/lib/IR/AutoUpgrade.cpp (original)
> +++ llvm/trunk/lib/IR/AutoUpgrade.cpp Fri Jul 22 08:58:44 2016
> @@ -296,6 +296,7 @@ static bool UpgradeIntrinsicFunction1(Fu
> Name.startswith("avx.blend.p") ||
> Name == "avx2.pblendw" ||
> Name.startswith("avx2.pblendd.") ||
> + Name.startswith("avx.vbroadcastf128") ||
> Name == "avx2.vbroadcasti128" ||
> Name == "xop.vpcmov" ||
> (Name.startswith("xop.vpcom") && F->arg_size() == 2))) {
> @@ -886,7 +887,7 @@ void llvm::UpgradeIntrinsicCall(CallInst
> Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
> Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
> Rep = Builder.CreateZExt(Rep, CI->getType(), "");
> - } else if (IsX86 && Name.startswith("avx.vbroadcast")) {
> + } else if (IsX86 && Name.startswith("avx.vbroadcast.s")) {
> // Replace broadcasts with a series of insertelements.
> Type *VecTy = CI->getType();
> Type *EltTy = VecTy->getVectorElementType();
> @@ -918,15 +919,21 @@ void llvm::UpgradeIntrinsicCall(CallInst
> bool DoSext = (StringRef::npos != Name.find("pmovsx"));
> Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
> : Builder.CreateZExt(SV, DstTy);
> - } else if (IsX86 && Name == "avx2.vbroadcasti128") {
> - // Replace vbroadcasts with a vector shuffle.
> - Type *VT = VectorType::get(Type::getInt64Ty(C), 2);
> + } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") ||
> + Name == "avx2.vbroadcasti128")) {
> + // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
> + Type *EltTy = CI->getType()->getVectorElementType();
> + unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
> + Type *VT = VectorType::get(EltTy, NumSrcElts);
> Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
> PointerType::getUnqual(VT));
> Value *Load = Builder.CreateLoad(VT, Op);
> - uint32_t Idxs[4] = { 0, 1, 0, 1 };
> - Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
> - Idxs);
> + if (NumSrcElts == 2)
> + Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
> + { 0, 1, 0, 1 });
> + else
> + Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
> + { 0, 1, 2, 3, 0, 1, 2, 3 });
> } else if (IsX86 && (Name.startswith("avx2.pbroadcast") ||
> Name.startswith("avx2.vbroadcast") ||
> Name.startswith("avx512.pbroadcast") ||
>
> Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=276416&r1=276415&r2=276416&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
> +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Fri Jul 22 08:58:44 2016
> @@ -12804,6 +12804,10 @@ static SDValue LowerINSERT_SUBVECTOR(SDV
> // (insert_subvector (insert_subvector undef, (load addr), 0),
> // (load addr + 16), Elts/2)
> // --> load32 addr
> + // or a 16-byte broadcast:
> + // (insert_subvector (insert_subvector undef, (load addr), 0),
> + // (load addr), Elts/2)
> + // --> X86SubVBroadcast(load16 addr)
> if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
> Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
> OpVT.is256BitVector() && SubVecVT.is128BitVector()) {
> @@ -12822,6 +12826,10 @@ static SDValue LowerINSERT_SUBVECTOR(SDV
> if (SDValue Ld = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false))
> return Ld;
> }
> +
> + // If lower/upper loads are the same then lower to a VBROADCASTF128.
> + if (SubVec2 == peekThroughBitcasts(SubVec))
> + return DAG.getNode(X86ISD::SUBV_BROADCAST, dl, OpVT, SubVec);
> }
> }
> }
>
> Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=276416&r1=276415&r2=276416&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
> +++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Fri Jul 22 08:58:44 2016
> @@ -986,6 +986,10 @@ multiclass avx512_subvec_broadcast_rm<bi
> AVX5128IBase, EVEX;
> }
>
> +//===----------------------------------------------------------------------===//
> +// AVX-512 BROADCAST SUBVECTORS
> +//
> +
> defm VBROADCASTI32X4 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
> v16i32_info, v4i32x_info>,
> EVEX_V512, EVEX_CD8<32, CD8VT4>;
> @@ -1006,7 +1010,13 @@ defm VBROADCASTI32X4Z256 : avx512_subvec
> defm VBROADCASTF32X4Z256 : avx512_subvec_broadcast_rm<0x1a, "vbroadcastf32x4",
> v8f32x_info, v4f32x_info>,
> EVEX_V256, EVEX_CD8<32, CD8VT4>;
> +
> +def : Pat<(v16i16 (X86SubVBroadcast (bc_v8i16 (loadv2i64 addr:$src)))),
> + (VBROADCASTI32X4Z256rm addr:$src)>;
> +def : Pat<(v32i8 (X86SubVBroadcast (bc_v16i8 (loadv2i64 addr:$src)))),
> + (VBROADCASTI32X4Z256rm addr:$src)>;
> }
> +
> let Predicates = [HasVLX, HasDQI] in {
> defm VBROADCASTI64X2Z128 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti64x2",
> v4i64x_info, v2i64x_info>, VEX_W,
> @@ -1015,6 +1025,14 @@ defm VBROADCASTF64X2Z128 : avx512_subvec
> v4f64x_info, v2f64x_info>, VEX_W,
> EVEX_V256, EVEX_CD8<64, CD8VT2>;
> }
> +
> +let Predicates = [HasVLX, NoDQI] in {
> +def : Pat<(v4f64 (X86SubVBroadcast (loadv2f64 addr:$src))),
> + (VBROADCASTF32X4Z256rm addr:$src)>;
> +def : Pat<(v4i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
> + (VBROADCASTI32X4Z256rm addr:$src)>;
> +}
> +
> let Predicates = [HasDQI] in {
> defm VBROADCASTI64X2 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti64x2",
> v8i64_info, v2i64x_info>, VEX_W,
>
> Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=276416&r1=276415&r2=276416&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
> +++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Fri Jul 22 08:58:44 2016
> @@ -7759,23 +7759,50 @@ let ExeDomain = SSEPackedDouble, Predica
> def VBROADCASTSDYrr : avx2_broadcast_rr<0x19, "vbroadcastsd", VR256,
> v4f64, v2f64, WriteFShuffle256>, VEX_L;
>
> +//===----------------------------------------------------------------------===//
> +// VBROADCAST*128 - Load from memory and broadcast 128-bit vector to both
> +// halves of a 256-bit vector.
> +//
> let mayLoad = 1, hasSideEffects = 0, Predicates = [HasAVX2] in
> def VBROADCASTI128 : AVX8I<0x5A, MRMSrcMem, (outs VR256:$dst),
> (ins i128mem:$src),
> "vbroadcasti128\t{$src, $dst|$dst, $src}", []>,
> Sched<[WriteLoad]>, VEX, VEX_L;
>
> +let mayLoad = 1, hasSideEffects = 0, Predicates = [HasAVX] in
> def VBROADCASTF128 : AVX8I<0x1A, MRMSrcMem, (outs VR256:$dst),
> (ins f128mem:$src),
> - "vbroadcastf128\t{$src, $dst|$dst, $src}",
> - [(set VR256:$dst,
> - (int_x86_avx_vbroadcastf128_pd_256 addr:$src))]>,
> + "vbroadcastf128\t{$src, $dst|$dst, $src}", []>,
> Sched<[WriteFShuffleLd]>, VEX, VEX_L;
>
> -let Predicates = [HasAVX] in
> -def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
> +let Predicates = [HasAVX2, NoVLX] in {
> +def : Pat<(v4i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
> + (VBROADCASTI128 addr:$src)>;
> +def : Pat<(v8i32 (X86SubVBroadcast (bc_v4i32 (loadv2i64 addr:$src)))),
> + (VBROADCASTI128 addr:$src)>;
> +def : Pat<(v16i16 (X86SubVBroadcast (bc_v8i16 (loadv2i64 addr:$src)))),
> + (VBROADCASTI128 addr:$src)>;
> +def : Pat<(v32i8 (X86SubVBroadcast (bc_v16i8 (loadv2i64 addr:$src)))),
> + (VBROADCASTI128 addr:$src)>;
> +}
> +
> +let Predicates = [HasAVX, NoVLX] in {
> +def : Pat<(v4f64 (X86SubVBroadcast (loadv2f64 addr:$src))),
> (VBROADCASTF128 addr:$src)>;
> +def : Pat<(v8f32 (X86SubVBroadcast (loadv4f32 addr:$src))),
> + (VBROADCASTF128 addr:$src)>;
> +}
>
> +let Predicates = [HasAVX1Only] in {
> +def : Pat<(v4i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
> + (VBROADCASTF128 addr:$src)>;
> +def : Pat<(v8i32 (X86SubVBroadcast (bc_v4i32 (loadv2i64 addr:$src)))),
> + (VBROADCASTF128 addr:$src)>;
> +def : Pat<(v16i16 (X86SubVBroadcast (bc_v8i16 (loadv2i64 addr:$src)))),
> + (VBROADCASTF128 addr:$src)>;
> +def : Pat<(v32i8 (X86SubVBroadcast (bc_v16i8 (loadv2i64 addr:$src)))),
> + (VBROADCASTF128 addr:$src)>;
> +}
>
> //===----------------------------------------------------------------------===//
> // VINSERTF128 - Insert packed floating-point values
>
> Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll?rev=276416&r1=276415&r2=276416&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll Fri Jul 22 08:58:44 2016
> @@ -207,11 +207,10 @@ define <4 x double> @test_mm256_broadcas
> ; X64: # BB#0:
> ; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: retq
> - %arg0 = bitcast <2 x double>* %a0 to i8*
> - %res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %arg0)
> + %ld = load <2 x double>, <2 x double>* %a0
> + %res = shufflevector <2 x double> %ld, <2 x double> %ld, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
> ret <4 x double> %res
> }
> -declare <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8*) nounwind readonly
>
> define <8 x float> @test_mm256_broadcast_ps(<4 x float>* %a0) nounwind {
> ; X32-LABEL: test_mm256_broadcast_ps:
> @@ -224,11 +223,10 @@ define <8 x float> @test_mm256_broadcast
> ; X64: # BB#0:
> ; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: retq
> - %arg0 = bitcast <4 x float>* %a0 to i8*
> - %res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %arg0)
> + %ld = load <4 x float>, <4 x float>* %a0
> + %res = shufflevector <4 x float> %ld, <4 x float> %ld, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
> ret <8 x float> %res
> }
> -declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly
>
> define <4 x double> @test_mm256_broadcast_sd(double* %a0) nounwind {
> ; X32-LABEL: test_mm256_broadcast_sd:
>
> Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll?rev=276416&r1=276415&r2=276416&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll Fri Jul 22 08:58:44 2016
> @@ -95,6 +95,30 @@ define <2 x double> @test_x86_avx_extrac
> }
>
>
> +define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) {
> +; CHECK-LABEL: test_x86_avx_vbroadcastf128_pd_256:
> +; CHECK: ## BB#0:
> +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
> +; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> +; CHECK-NEXT: retl
> + %res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1]
> + ret <4 x double> %res
> +}
> +declare <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8*) nounwind readonly
> +
> +
> +define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) {
> +; CHECK-LABEL: test_x86_avx_vbroadcastf128_ps_256:
> +; CHECK: ## BB#0:
> +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
> +; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> +; CHECK-NEXT: retl
> + %res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1]
> + ret <8 x float> %res
> +}
> +declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly
> +
> +
> define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) {
> ; CHECK-LABEL: test_x86_avx_blend_pd_256:
> ; CHECK: ## BB#0:
> @@ -364,7 +388,7 @@ define void @test_x86_sse2_storeu_dq(i8*
> ; CHECK-LABEL: test_x86_sse2_storeu_dq:
> ; CHECK: ## BB#0:
> ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; CHECK-NEXT: vpaddb LCPI32_0, %xmm0, %xmm0
> +; CHECK-NEXT: vpaddb LCPI34_0, %xmm0, %xmm0
> ; CHECK-NEXT: vmovdqu %xmm0, (%eax)
> ; CHECK-NEXT: retl
> %a2 = add <16 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
>
> Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll?rev=276416&r1=276415&r2=276416&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll Fri Jul 22 08:58:44 2016
> @@ -3970,42 +3970,6 @@ define <8 x float> @test_x86_avx_sqrt_ps
> declare <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float>) nounwind readnone
>
>
> -define <4 x double> @test_x86_avx_vbroadcastf128_pd_256(i8* %a0) {
> -; AVX-LABEL: test_x86_avx_vbroadcastf128_pd_256:
> -; AVX: ## BB#0:
> -; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> -; AVX-NEXT: retl
> -;
> -; AVX512VL-LABEL: test_x86_avx_vbroadcastf128_pd_256:
> -; AVX512VL: ## BB#0:
> -; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; AVX512VL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> -; AVX512VL-NEXT: retl
> - %res = call <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8* %a0) ; <<4 x double>> [#uses=1]
> - ret <4 x double> %res
> -}
> -declare <4 x double> @llvm.x86.avx.vbroadcastf128.pd.256(i8*) nounwind readonly
> -
> -
> -define <8 x float> @test_x86_avx_vbroadcastf128_ps_256(i8* %a0) {
> -; AVX-LABEL: test_x86_avx_vbroadcastf128_ps_256:
> -; AVX: ## BB#0:
> -; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> -; AVX-NEXT: retl
> -;
> -; AVX512VL-LABEL: test_x86_avx_vbroadcastf128_ps_256:
> -; AVX512VL: ## BB#0:
> -; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; AVX512VL-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> -; AVX512VL-NEXT: retl
> - %res = call <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8* %a0) ; <<8 x float>> [#uses=1]
> - ret <8 x float> %res
> -}
> -declare <8 x float> @llvm.x86.avx.vbroadcastf128.ps.256(i8*) nounwind readonly
> -
> -
> define <4 x double> @test_x86_avx_vperm2f128_pd_256(<4 x double> %a0, <4 x double> %a1) {
> ; AVX-LABEL: test_x86_avx_vperm2f128_pd_256:
> ; AVX: ## BB#0:
> @@ -4585,7 +4549,7 @@ define void @movnt_dq(i8* %p, <2 x i64>
> ; AVX-LABEL: movnt_dq:
> ; AVX: ## BB#0:
> ; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; AVX-NEXT: vpaddq LCPI256_0, %xmm0, %xmm0
> +; AVX-NEXT: vpaddq LCPI254_0, %xmm0, %xmm0
> ; AVX-NEXT: vmovntdq %ymm0, (%eax)
> ; AVX-NEXT: vzeroupper
> ; AVX-NEXT: retl
> @@ -4593,7 +4557,7 @@ define void @movnt_dq(i8* %p, <2 x i64>
> ; AVX512VL-LABEL: movnt_dq:
> ; AVX512VL: ## BB#0:
> ; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; AVX512VL-NEXT: vpaddq LCPI256_0, %xmm0, %xmm0
> +; AVX512VL-NEXT: vpaddq LCPI254_0, %xmm0, %xmm0
> ; AVX512VL-NEXT: vmovntdq %ymm0, (%eax)
> ; AVX512VL-NEXT: retl
> %a2 = add <2 x i64> %a1, <i64 1, i64 1>
>
> Modified: llvm/trunk/test/CodeGen/X86/avx-vbroadcastf128.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vbroadcastf128.ll?rev=276416&r1=276415&r2=276416&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/avx-vbroadcastf128.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/avx-vbroadcastf128.ll Fri Jul 22 08:58:44 2016
> @@ -6,14 +6,12 @@ define <4 x double> @test_broadcast_2f64
> ; X32-LABEL: test_broadcast_2f64_4f64:
> ; X32: ## BB#0:
> ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; X32-NEXT: vmovaps (%eax), %xmm0
> -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X32-NEXT: retl
> ;
> ; X64-LABEL: test_broadcast_2f64_4f64:
> ; X64: ## BB#0:
> -; X64-NEXT: vmovaps (%rdi), %xmm0
> -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: retq
> %1 = load <2 x double>, <2 x double> *%p
> %2 = shufflevector <2 x double> %1, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
> @@ -24,14 +22,12 @@ define <4 x i64> @test_broadcast_2i64_4i
> ; X32-LABEL: test_broadcast_2i64_4i64:
> ; X32: ## BB#0:
> ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; X32-NEXT: vmovaps (%eax), %xmm0
> -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X32-NEXT: retl
> ;
> ; X64-LABEL: test_broadcast_2i64_4i64:
> ; X64: ## BB#0:
> -; X64-NEXT: vmovaps (%rdi), %xmm0
> -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: retq
> %1 = load <2 x i64>, <2 x i64> *%p
> %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
> @@ -42,14 +38,12 @@ define <8 x float> @test_broadcast_4f32_
> ; X32-LABEL: test_broadcast_4f32_8f32:
> ; X32: ## BB#0:
> ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; X32-NEXT: vmovaps (%eax), %xmm0
> -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X32-NEXT: retl
> ;
> ; X64-LABEL: test_broadcast_4f32_8f32:
> ; X64: ## BB#0:
> -; X64-NEXT: vmovaps (%rdi), %xmm0
> -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: retq
> %1 = load <4 x float>, <4 x float> *%p
> %2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
> @@ -60,14 +54,12 @@ define <8 x i32> @test_broadcast_4i32_8i
> ; X32-LABEL: test_broadcast_4i32_8i32:
> ; X32: ## BB#0:
> ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; X32-NEXT: vmovaps (%eax), %xmm0
> -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X32-NEXT: retl
> ;
> ; X64-LABEL: test_broadcast_4i32_8i32:
> ; X64: ## BB#0:
> -; X64-NEXT: vmovaps (%rdi), %xmm0
> -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: retq
> %1 = load <4 x i32>, <4 x i32> *%p
> %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
> @@ -78,14 +70,12 @@ define <16 x i16> @test_broadcast_8i16_1
> ; X32-LABEL: test_broadcast_8i16_16i16:
> ; X32: ## BB#0:
> ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; X32-NEXT: vmovaps (%eax), %xmm0
> -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X32-NEXT: retl
> ;
> ; X64-LABEL: test_broadcast_8i16_16i16:
> ; X64: ## BB#0:
> -; X64-NEXT: vmovaps (%rdi), %xmm0
> -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: retq
> %1 = load <8 x i16>, <8 x i16> *%p
> %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
> @@ -96,14 +86,12 @@ define <32 x i8> @test_broadcast_16i8_32
> ; X32-LABEL: test_broadcast_16i8_32i8:
> ; X32: ## BB#0:
> ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; X32-NEXT: vmovaps (%eax), %xmm0
> -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X32-NEXT: retl
> ;
> ; X64-LABEL: test_broadcast_16i8_32i8:
> ; X64: ## BB#0:
> -; X64-NEXT: vmovaps (%rdi), %xmm0
> -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: retq
> %1 = load <16 x i8>, <16 x i8> *%p
> %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
>
> Modified: llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll?rev=276416&r1=276415&r2=276416&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll Fri Jul 22 08:58:44 2016
> @@ -505,14 +505,12 @@ define <4 x i64> @test_mm256_broadcastsi
> ; X32-LABEL: test_mm256_broadcastsi128_si256_mem:
> ; X32: # BB#0:
> ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; X32-NEXT: vmovaps (%eax), %xmm0
> -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X32-NEXT: retl
> ;
> ; X64-LABEL: test_mm256_broadcastsi128_si256_mem:
> ; X64: # BB#0:
> -; X64-NEXT: vmovaps (%rdi), %xmm0
> -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: retq
> %a0 = load <2 x i64>, <2 x i64>* %p0
> %res = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
>
> Modified: llvm/trunk/test/CodeGen/X86/avx2-vbroadcasti128.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-vbroadcasti128.ll?rev=276416&r1=276415&r2=276416&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/avx2-vbroadcasti128.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/avx2-vbroadcasti128.ll Fri Jul 22 08:58:44 2016
> @@ -6,15 +6,13 @@ define <4 x double> @test_broadcast_2f64
> ; X32-LABEL: test_broadcast_2f64_4f64:
> ; X32: ## BB#0:
> ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; X32-NEXT: vmovapd (%eax), %xmm0
> -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X32-NEXT: vaddpd LCPI0_0, %ymm0, %ymm0
> ; X32-NEXT: retl
> ;
> ; X64-LABEL: test_broadcast_2f64_4f64:
> ; X64: ## BB#0:
> -; X64-NEXT: vmovapd (%rdi), %xmm0
> -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
> ; X64-NEXT: retq
> %1 = load <2 x double>, <2 x double> *%p
> @@ -27,15 +25,13 @@ define <4 x i64> @test_broadcast_2i64_4i
> ; X32-LABEL: test_broadcast_2i64_4i64:
> ; X32: ## BB#0:
> ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; X32-NEXT: vmovdqa (%eax), %xmm0
> -; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
> +; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X32-NEXT: vpaddq LCPI1_0, %ymm0, %ymm0
> ; X32-NEXT: retl
> ;
> ; X64-LABEL: test_broadcast_2i64_4i64:
> ; X64: ## BB#0:
> -; X64-NEXT: vmovdqa (%rdi), %xmm0
> -; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
> +; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
> ; X64-NEXT: retq
> %1 = load <2 x i64>, <2 x i64> *%p
> @@ -48,15 +44,13 @@ define <8 x float> @test_broadcast_4f32_
> ; X32-LABEL: test_broadcast_4f32_8f32:
> ; X32: ## BB#0:
> ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; X32-NEXT: vmovaps (%eax), %xmm0
> -; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X32-NEXT: vaddps LCPI2_0, %ymm0, %ymm0
> ; X32-NEXT: retl
> ;
> ; X64-LABEL: test_broadcast_4f32_8f32:
> ; X64: ## BB#0:
> -; X64-NEXT: vmovaps (%rdi), %xmm0
> -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
> ; X64-NEXT: retq
> %1 = load <4 x float>, <4 x float> *%p
> @@ -69,15 +63,13 @@ define <8 x i32> @test_broadcast_4i32_8i
> ; X32-LABEL: test_broadcast_4i32_8i32:
> ; X32: ## BB#0:
> ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; X32-NEXT: vmovdqa (%eax), %xmm0
> -; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
> +; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X32-NEXT: vpaddd LCPI3_0, %ymm0, %ymm0
> ; X32-NEXT: retl
> ;
> ; X64-LABEL: test_broadcast_4i32_8i32:
> ; X64: ## BB#0:
> -; X64-NEXT: vmovdqa (%rdi), %xmm0
> -; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
> +; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
> ; X64-NEXT: retq
> %1 = load <4 x i32>, <4 x i32> *%p
> @@ -90,15 +82,13 @@ define <16 x i16> @test_broadcast_8i16_1
> ; X32-LABEL: test_broadcast_8i16_16i16:
> ; X32: ## BB#0:
> ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; X32-NEXT: vmovdqa (%eax), %xmm0
> -; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
> +; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X32-NEXT: vpaddw LCPI4_0, %ymm0, %ymm0
> ; X32-NEXT: retl
> ;
> ; X64-LABEL: test_broadcast_8i16_16i16:
> ; X64: ## BB#0:
> -; X64-NEXT: vmovdqa (%rdi), %xmm0
> -; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
> +; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
> ; X64-NEXT: retq
> %1 = load <8 x i16>, <8 x i16> *%p
> @@ -111,15 +101,13 @@ define <32 x i8> @test_broadcast_16i8_32
> ; X32-LABEL: test_broadcast_16i8_32i8:
> ; X32: ## BB#0:
> ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
> -; X32-NEXT: vmovdqa (%eax), %xmm0
> -; X32-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
> +; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X32-NEXT: vpaddb LCPI5_0, %ymm0, %ymm0
> ; X32-NEXT: retl
> ;
> ; X64-LABEL: test_broadcast_16i8_32i8:
> ; X64: ## BB#0:
> -; X64-NEXT: vmovdqa (%rdi), %xmm0
> -; X64-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
> +; X64-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; X64-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
> ; X64-NEXT: retq
> %1 = load <16 x i8>, <16 x i8> *%p
>
> Modified: llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll?rev=276416&r1=276415&r2=276416&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/avx512-vbroadcasti128.ll Fri Jul 22 08:58:44 2016
> @@ -10,22 +10,19 @@
> define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
> ; X64-AVX512VL-LABEL: test_broadcast_2f64_4f64:
> ; X64-AVX512VL: ## BB#0:
> -; X64-AVX512VL-NEXT: vmovapd (%rdi), %xmm0
> -; X64-AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
> ; X64-AVX512VL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
> ; X64-AVX512VL-NEXT: retq
> ;
> ; X64-AVX512BWVL-LABEL: test_broadcast_2f64_4f64:
> ; X64-AVX512BWVL: ## BB#0:
> -; X64-AVX512BWVL-NEXT: vmovapd (%rdi), %xmm0
> -; X64-AVX512BWVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
> ; X64-AVX512BWVL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
> ; X64-AVX512BWVL-NEXT: retq
> ;
> ; X64-AVX512DQVL-LABEL: test_broadcast_2f64_4f64:
> ; X64-AVX512DQVL: ## BB#0:
> -; X64-AVX512DQVL-NEXT: vmovapd (%rdi), %xmm0
> -; X64-AVX512DQVL-NEXT: vinsertf64x2 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512DQVL-NEXT: vbroadcastf64x2 (%rdi), %ymm0
> ; X64-AVX512DQVL-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
> ; X64-AVX512DQVL-NEXT: retq
> %1 = load <2 x double>, <2 x double> *%p
> @@ -37,22 +34,19 @@ define <4 x double> @test_broadcast_2f64
> define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
> ; X64-AVX512VL-LABEL: test_broadcast_2i64_4i64:
> ; X64-AVX512VL: ## BB#0:
> -; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
> ; X64-AVX512VL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
> ; X64-AVX512VL-NEXT: retq
> ;
> ; X64-AVX512BWVL-LABEL: test_broadcast_2i64_4i64:
> ; X64-AVX512BWVL: ## BB#0:
> -; X64-AVX512BWVL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
> ; X64-AVX512BWVL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
> ; X64-AVX512BWVL-NEXT: retq
> ;
> ; X64-AVX512DQVL-LABEL: test_broadcast_2i64_4i64:
> ; X64-AVX512DQVL: ## BB#0:
> -; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512DQVL-NEXT: vinserti64x2 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512DQVL-NEXT: vbroadcasti64x2 (%rdi), %ymm0
> ; X64-AVX512DQVL-NEXT: vpaddq {{.*}}(%rip), %ymm0, %ymm0
> ; X64-AVX512DQVL-NEXT: retq
> %1 = load <2 x i64>, <2 x i64> *%p
> @@ -64,8 +58,7 @@ define <4 x i64> @test_broadcast_2i64_4i
> define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
> ; X64-AVX512-LABEL: test_broadcast_4f32_8f32:
> ; X64-AVX512: ## BB#0:
> -; X64-AVX512-NEXT: vmovaps (%rdi), %xmm0
> -; X64-AVX512-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512-NEXT: vbroadcastf32x4 (%rdi), %ymm0
> ; X64-AVX512-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0
> ; X64-AVX512-NEXT: retq
> %1 = load <4 x float>, <4 x float> *%p
> @@ -77,8 +70,7 @@ define <8 x float> @test_broadcast_4f32_
> define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
> ; X64-AVX512-LABEL: test_broadcast_4i32_8i32:
> ; X64-AVX512: ## BB#0:
> -; X64-AVX512-NEXT: vmovdqa32 (%rdi), %xmm0
> -; X64-AVX512-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
> ; X64-AVX512-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
> ; X64-AVX512-NEXT: retq
> %1 = load <4 x i32>, <4 x i32> *%p
> @@ -88,26 +80,11 @@ define <8 x i32> @test_broadcast_4i32_8i
> }
>
> define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
> -; X64-AVX512VL-LABEL: test_broadcast_8i16_16i16:
> -; X64-AVX512VL: ## BB#0:
> -; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> -; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
> -; X64-AVX512VL-NEXT: retq
> -;
> -; X64-AVX512BWVL-LABEL: test_broadcast_8i16_16i16:
> -; X64-AVX512BWVL: ## BB#0:
> -; X64-AVX512BWVL-NEXT: vmovdqu16 (%rdi), %xmm0
> -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> -; X64-AVX512BWVL-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
> -; X64-AVX512BWVL-NEXT: retq
> -;
> -; X64-AVX512DQVL-LABEL: test_broadcast_8i16_16i16:
> -; X64-AVX512DQVL: ## BB#0:
> -; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> -; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
> -; X64-AVX512DQVL-NEXT: retq
> +; X64-AVX512-LABEL: test_broadcast_8i16_16i16:
> +; X64-AVX512: ## BB#0:
> +; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
> +; X64-AVX512-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
> +; X64-AVX512-NEXT: retq
> %1 = load <8 x i16>, <8 x i16> *%p
> %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
> %3 = add <16 x i16> %2, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>
> @@ -115,26 +92,11 @@ define <16 x i16> @test_broadcast_8i16_1
> }
>
> define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
> -; X64-AVX512VL-LABEL: test_broadcast_16i8_32i8:
> -; X64-AVX512VL: ## BB#0:
> -; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> -; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
> -; X64-AVX512VL-NEXT: retq
> -;
> -; X64-AVX512BWVL-LABEL: test_broadcast_16i8_32i8:
> -; X64-AVX512BWVL: ## BB#0:
> -; X64-AVX512BWVL-NEXT: vmovdqu8 (%rdi), %xmm0
> -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> -; X64-AVX512BWVL-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
> -; X64-AVX512BWVL-NEXT: retq
> -;
> -; X64-AVX512DQVL-LABEL: test_broadcast_16i8_32i8:
> -; X64-AVX512DQVL: ## BB#0:
> -; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> -; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
> -; X64-AVX512DQVL-NEXT: retq
> +; X64-AVX512-LABEL: test_broadcast_16i8_32i8:
> +; X64-AVX512: ## BB#0:
> +; X64-AVX512-NEXT: vbroadcasti32x4 (%rdi), %ymm0
> +; X64-AVX512-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
> +; X64-AVX512-NEXT: retq
> %1 = load <16 x i8>, <16 x i8> *%p
> %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
> %3 = add <32 x i8> %2, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32>
> @@ -148,24 +110,21 @@ define <32 x i8> @test_broadcast_16i8_32
> define <8 x double> @test_broadcast_2f64_8f64(<2 x double> *%p) nounwind {
> ; X64-AVX512VL-LABEL: test_broadcast_2f64_8f64:
> ; X64-AVX512VL: ## BB#0:
> -; X64-AVX512VL-NEXT: vmovapd (%rdi), %xmm0
> -; X64-AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
> ; X64-AVX512VL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512VL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512VL-NEXT: retq
> ;
> ; X64-AVX512BWVL-LABEL: test_broadcast_2f64_8f64:
> ; X64-AVX512BWVL: ## BB#0:
> -; X64-AVX512BWVL-NEXT: vmovapd (%rdi), %xmm0
> -; X64-AVX512BWVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
> ; X64-AVX512BWVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512BWVL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512BWVL-NEXT: retq
> ;
> ; X64-AVX512DQVL-LABEL: test_broadcast_2f64_8f64:
> ; X64-AVX512DQVL: ## BB#0:
> -; X64-AVX512DQVL-NEXT: vmovapd (%rdi), %xmm0
> -; X64-AVX512DQVL-NEXT: vinsertf64x2 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512DQVL-NEXT: vbroadcastf64x2 (%rdi), %ymm0
> ; X64-AVX512DQVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512DQVL-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512DQVL-NEXT: retq
> @@ -178,24 +137,21 @@ define <8 x double> @test_broadcast_2f64
> define <8 x i64> @test_broadcast_2i64_8i64(<2 x i64> *%p) nounwind {
> ; X64-AVX512VL-LABEL: test_broadcast_2i64_8i64:
> ; X64-AVX512VL: ## BB#0:
> -; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
> ; X64-AVX512VL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512VL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512VL-NEXT: retq
> ;
> ; X64-AVX512BWVL-LABEL: test_broadcast_2i64_8i64:
> ; X64-AVX512BWVL: ## BB#0:
> -; X64-AVX512BWVL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
> ; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512BWVL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512BWVL-NEXT: retq
> ;
> ; X64-AVX512DQVL-LABEL: test_broadcast_2i64_8i64:
> ; X64-AVX512DQVL: ## BB#0:
> -; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512DQVL-NEXT: vinserti64x2 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512DQVL-NEXT: vbroadcasti64x2 (%rdi), %ymm0
> ; X64-AVX512DQVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512DQVL-NEXT: vpaddq {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512DQVL-NEXT: retq
> @@ -208,24 +164,21 @@ define <8 x i64> @test_broadcast_2i64_8i
> define <16 x float> @test_broadcast_4f32_16f32(<4 x float> *%p) nounwind {
> ; X64-AVX512VL-LABEL: test_broadcast_4f32_16f32:
> ; X64-AVX512VL: ## BB#0:
> -; X64-AVX512VL-NEXT: vmovaps (%rdi), %xmm0
> -; X64-AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
> ; X64-AVX512VL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512VL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512VL-NEXT: retq
> ;
> ; X64-AVX512BWVL-LABEL: test_broadcast_4f32_16f32:
> ; X64-AVX512BWVL: ## BB#0:
> -; X64-AVX512BWVL-NEXT: vmovaps (%rdi), %xmm0
> -; X64-AVX512BWVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512BWVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
> ; X64-AVX512BWVL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512BWVL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512BWVL-NEXT: retq
> ;
> ; X64-AVX512DQVL-LABEL: test_broadcast_4f32_16f32:
> ; X64-AVX512DQVL: ## BB#0:
> -; X64-AVX512DQVL-NEXT: vmovaps (%rdi), %xmm0
> -; X64-AVX512DQVL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512DQVL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
> ; X64-AVX512DQVL-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512DQVL-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512DQVL-NEXT: retq
> @@ -238,24 +191,21 @@ define <16 x float> @test_broadcast_4f32
> define <16 x i32> @test_broadcast_4i32_16i32(<4 x i32> *%p) nounwind {
> ; X64-AVX512VL-LABEL: test_broadcast_4i32_16i32:
> ; X64-AVX512VL: ## BB#0:
> -; X64-AVX512VL-NEXT: vmovdqa32 (%rdi), %xmm0
> -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
> ; X64-AVX512VL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512VL-NEXT: retq
> ;
> ; X64-AVX512BWVL-LABEL: test_broadcast_4i32_16i32:
> ; X64-AVX512BWVL: ## BB#0:
> -; X64-AVX512BWVL-NEXT: vmovdqa32 (%rdi), %xmm0
> -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
> ; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512BWVL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512BWVL-NEXT: retq
> ;
> ; X64-AVX512DQVL-LABEL: test_broadcast_4i32_16i32:
> ; X64-AVX512DQVL: ## BB#0:
> -; X64-AVX512DQVL-NEXT: vmovdqa32 (%rdi), %xmm0
> -; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
> ; X64-AVX512DQVL-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512DQVL-NEXT: vpaddd {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512DQVL-NEXT: retq
> @@ -268,24 +218,21 @@ define <16 x i32> @test_broadcast_4i32_1
> define <32 x i16> @test_broadcast_8i16_32i16(<8 x i16> *%p) nounwind {
> ; X64-AVX512VL-LABEL: test_broadcast_8i16_32i16:
> ; X64-AVX512VL: ## BB#0:
> -; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1
> +; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
> ; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
> ; X64-AVX512VL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
> ; X64-AVX512VL-NEXT: retq
> ;
> ; X64-AVX512BWVL-LABEL: test_broadcast_8i16_32i16:
> ; X64-AVX512BWVL: ## BB#0:
> -; X64-AVX512BWVL-NEXT: vmovdqu16 (%rdi), %xmm0
> -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
> ; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512BWVL-NEXT: vpaddw {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512BWVL-NEXT: retq
> ;
> ; X64-AVX512DQVL-LABEL: test_broadcast_8i16_32i16:
> ; X64-AVX512DQVL: ## BB#0:
> -; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1
> +; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
> ; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm0
> ; X64-AVX512DQVL-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1
> ; X64-AVX512DQVL-NEXT: retq
> @@ -298,24 +245,21 @@ define <32 x i16> @test_broadcast_8i16_3
> define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind {
> ; X64-AVX512VL-LABEL: test_broadcast_16i8_64i8:
> ; X64-AVX512VL: ## BB#0:
> -; X64-AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1
> +; X64-AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
> ; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
> ; X64-AVX512VL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
> ; X64-AVX512VL-NEXT: retq
> ;
> ; X64-AVX512BWVL-LABEL: test_broadcast_16i8_64i8:
> ; X64-AVX512BWVL: ## BB#0:
> -; X64-AVX512BWVL-NEXT: vmovdqu8 (%rdi), %xmm0
> -; X64-AVX512BWVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> +; X64-AVX512BWVL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
> ; X64-AVX512BWVL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
> ; X64-AVX512BWVL-NEXT: vpaddb {{.*}}(%rip), %zmm0, %zmm0
> ; X64-AVX512BWVL-NEXT: retq
> ;
> ; X64-AVX512DQVL-LABEL: test_broadcast_16i8_64i8:
> ; X64-AVX512DQVL: ## BB#0:
> -; X64-AVX512DQVL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; X64-AVX512DQVL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1
> +; X64-AVX512DQVL-NEXT: vbroadcasti32x4 (%rdi), %ymm1
> ; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm0
> ; X64-AVX512DQVL-NEXT: vpaddb {{.*}}(%rip), %ymm1, %ymm1
> ; X64-AVX512DQVL-NEXT: retq
>
> Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll?rev=276416&r1=276415&r2=276416&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll Fri Jul 22 08:58:44 2016
> @@ -1352,20 +1352,17 @@ define <4 x double> @splat_mem_v4f64_fro
> define <4 x i64> @splat128_mem_v4i64_from_v2i64(<2 x i64>* %ptr) {
> ; AVX1-LABEL: splat128_mem_v4i64_from_v2i64:
> ; AVX1: # BB#0:
> -; AVX1-NEXT: vmovaps (%rdi), %xmm0
> -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; AVX1-NEXT: retq
> ;
> ; AVX2-LABEL: splat128_mem_v4i64_from_v2i64:
> ; AVX2: # BB#0:
> -; AVX2-NEXT: vmovaps (%rdi), %xmm0
> -; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; AVX2-NEXT: retq
> ;
> ; AVX512VL-LABEL: splat128_mem_v4i64_from_v2i64:
> ; AVX512VL: # BB#0:
> -; AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0
> -; AVX512VL-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm0
> +; AVX512VL-NEXT: vbroadcasti32x4 (%rdi), %ymm0
> ; AVX512VL-NEXT: retq
> %v = load <2 x i64>, <2 x i64>* %ptr
> %shuffle = shufflevector <2 x i64> %v, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
> @@ -1375,20 +1372,17 @@ define <4 x i64> @splat128_mem_v4i64_fro
> define <4 x double> @splat128_mem_v4f64_from_v2f64(<2 x double>* %ptr) {
> ; AVX1-LABEL: splat128_mem_v4f64_from_v2f64:
> ; AVX1: # BB#0:
> -; AVX1-NEXT: vmovaps (%rdi), %xmm0
> -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; AVX1-NEXT: retq
> ;
> ; AVX2-LABEL: splat128_mem_v4f64_from_v2f64:
> ; AVX2: # BB#0:
> -; AVX2-NEXT: vmovaps (%rdi), %xmm0
> -; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
> +; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
> ; AVX2-NEXT: retq
> ;
> ; AVX512VL-LABEL: splat128_mem_v4f64_from_v2f64:
> ; AVX512VL: # BB#0:
> -; AVX512VL-NEXT: vmovapd (%rdi), %xmm0
> -; AVX512VL-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm0
> +; AVX512VL-NEXT: vbroadcastf32x4 (%rdi), %ymm0
> ; AVX512VL-NEXT: retq
> %v = load <2 x double>, <2 x double>* %ptr
> %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
More information about the llvm-commits
mailing list