[llvm] [X86][AVX] Match v4f64 blend from shuffle of scalar values. (PR #135753)

Leon Clark via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 15 22:50:44 PDT 2025


https://github.com/PeddleSpam updated https://github.com/llvm/llvm-project/pull/135753

>From cc40158c2259c122dbb1aa0a3a26e3fde5808c25 Mon Sep 17 00:00:00 2001
From: Leon Clark <leoclark at amd.com>
Date: Tue, 15 Apr 2025 07:45:46 +0100
Subject: [PATCH 1/5] [X86][AVX] Match v4f64 blend from shuffle of scalar
 values.

Convert a BUILD_VECTOR of scalar values to a shuffle of shuffles that will lower to AVX blend.
---
 llvm/test/CodeGen/X86/shuffle-blendw.ll | 422 ++++++++++++++++++++++++
 1 file changed, 422 insertions(+)

diff --git a/llvm/test/CodeGen/X86/shuffle-blendw.ll b/llvm/test/CodeGen/X86/shuffle-blendw.ll
index 9f90657dc64d1..28af382ec3e07 100644
--- a/llvm/test/CodeGen/X86/shuffle-blendw.ll
+++ b/llvm/test/CodeGen/X86/shuffle-blendw.ll
@@ -263,3 +263,425 @@ define <8 x i16> @blendw_to_blendd_fail_16(<8 x i16> %x, <8 x i16> %y, <8 x i16>
   %shuffle = shufflevector <8 x i16> %x1, <8 x i16> %y, <8 x i32> <i32 8, i32 1, i32 10, i32 11, i32 4, i32 5, i32 14, i32 15>
   ret <8 x i16> %shuffle
 }
+
+define <4 x double> @blend_broadcasts_v4f64(ptr %p0, ptr %p1)  {
+; X86-SSE41-LABEL: blend_broadcasts_v4f64:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE41-NEXT:    movaps (%ecx), %xmm2
+; X86-SSE41-NEXT:    movaps (%eax), %xmm1
+; X86-SSE41-NEXT:    movaps %xmm2, %xmm0
+; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X86-SSE41-NEXT:    retl
+;
+; X64-SSE41-LABEL: blend_broadcasts_v4f64:
+; X64-SSE41:       # %bb.0:
+; X64-SSE41-NEXT:    movaps (%rdi), %xmm2
+; X64-SSE41-NEXT:    movaps (%rsi), %xmm1
+; X64-SSE41-NEXT:    movaps %xmm2, %xmm0
+; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X64-SSE41-NEXT:    retq
+;
+; X86-AVX-LABEL: blend_broadcasts_v4f64:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    vbroadcastsd (%ecx), %ymm0
+; X86-AVX-NEXT:    vbroadcastsd (%eax), %ymm1
+; X86-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
+; X86-AVX-NEXT:    retl
+;
+; X64-AVX-LABEL: blend_broadcasts_v4f64:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vbroadcastsd (%rdi), %ymm0
+; X64-AVX-NEXT:    vbroadcastsd (%rsi), %ymm1
+; X64-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
+; X64-AVX-NEXT:    retq
+;
+; X86-AVX2-LABEL: blend_broadcasts_v4f64:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vbroadcastsd (%ecx), %ymm0
+; X86-AVX2-NEXT:    vbroadcastsd (%eax), %ymm1
+; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
+; X86-AVX2-NEXT:    retl
+;
+; X64-AVX2-LABEL: blend_broadcasts_v4f64:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    vbroadcastsd (%rdi), %ymm0
+; X64-AVX2-NEXT:    vbroadcastsd (%rsi), %ymm1
+; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
+; X64-AVX2-NEXT:    retq
+;
+; X86-AVX512-LABEL: blend_broadcasts_v4f64:
+; X86-AVX512:       # %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512-NEXT:    vbroadcastsd (%ecx), %ymm0
+; X86-AVX512-NEXT:    vbroadcastsd (%eax), %ymm1
+; X86-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
+; X86-AVX512-NEXT:    retl
+;
+; X64-AVX512-LABEL: blend_broadcasts_v4f64:
+; X64-AVX512:       # %bb.0:
+; X64-AVX512-NEXT:    vbroadcastsd (%rdi), %ymm0
+; X64-AVX512-NEXT:    vbroadcastsd (%rsi), %ymm1
+; X64-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
+; X64-AVX512-NEXT:    retq
+  %ld0 = load <4 x double>, ptr %p0, align 32
+  %ld1 = load <4 x double>, ptr %p1, align 32
+  %bcst0 = shufflevector <4 x double> %ld0, <4 x double> undef, <4 x i32> zeroinitializer
+  %bcst1 = shufflevector <4 x double> %ld1, <4 x double> undef, <4 x i32> zeroinitializer
+  %blend = shufflevector <4 x double> %bcst0, <4 x double> %bcst1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
+  ret <4 x double> %blend
+}
+
+define <4 x double> @blend_broadcasts_v2f64(ptr %p0, ptr %p1) {
+; X86-SSE41-LABEL: blend_broadcasts_v2f64:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE41-NEXT:    movaps (%ecx), %xmm2
+; X86-SSE41-NEXT:    movaps (%eax), %xmm1
+; X86-SSE41-NEXT:    movaps %xmm2, %xmm0
+; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X86-SSE41-NEXT:    retl
+;
+; X64-SSE41-LABEL: blend_broadcasts_v2f64:
+; X64-SSE41:       # %bb.0:
+; X64-SSE41-NEXT:    movaps (%rdi), %xmm2
+; X64-SSE41-NEXT:    movaps (%rsi), %xmm1
+; X64-SSE41-NEXT:    movaps %xmm2, %xmm0
+; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X64-SSE41-NEXT:    retq
+;
+; X86-AVX-LABEL: blend_broadcasts_v2f64:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X86-AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; X86-AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; X86-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; X86-AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
+; X86-AVX-NEXT:    retl
+;
+; X64-AVX-LABEL: blend_broadcasts_v2f64:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; X64-AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; X64-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; X64-AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
+; X64-AVX-NEXT:    retq
+;
+; X86-AVX2-LABEL: blend_broadcasts_v2f64:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X86-AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; X86-AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
+; X86-AVX2-NEXT:    retl
+;
+; X64-AVX2-LABEL: blend_broadcasts_v2f64:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X64-AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; X64-AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
+; X64-AVX2-NEXT:    retq
+;
+; X86-AVX512-LABEL: blend_broadcasts_v2f64:
+; X86-AVX512:       # %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; X86-AVX512-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; X86-AVX512-NEXT:    vpmovsxbq {{.*#+}} ymm0 = [0,4,6,2]
+; X86-AVX512-NEXT:    vpermi2pd %ymm1, %ymm2, %ymm0
+; X86-AVX512-NEXT:    retl
+;
+; X64-AVX512-LABEL: blend_broadcasts_v2f64:
+; X64-AVX512:       # %bb.0:
+; X64-AVX512-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; X64-AVX512-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; X64-AVX512-NEXT:    vpmovsxbq {{.*#+}} ymm0 = [0,4,6,2]
+; X64-AVX512-NEXT:    vpermi2pd %ymm1, %ymm2, %ymm0
+; X64-AVX512-NEXT:    retq
+  %ld0 = load <2 x double>, ptr %p0, align 32
+  %ld1 = load <2 x double>, ptr %p1, align 32
+  %blend = shufflevector <2 x double> %ld0, <2 x double> %ld1, <4 x i32> <i32 0, i32 2, i32 2, i32 0>
+  ret <4 x double> %blend
+}
+
+define <4 x double> @blend_broadcasts_v1f64(ptr %p0, ptr %p1) {
+; X86-SSE41-LABEL: blend_broadcasts_v1f64:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; X86-SSE41-NEXT:    movaps %xmm2, %xmm0
+; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X86-SSE41-NEXT:    retl
+;
+; X64-SSE41-LABEL: blend_broadcasts_v1f64:
+; X64-SSE41:       # %bb.0:
+; X64-SSE41-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X64-SSE41-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; X64-SSE41-NEXT:    movaps %xmm2, %xmm0
+; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X64-SSE41-NEXT:    retq
+;
+; X86-AVX-LABEL: blend_broadcasts_v1f64:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX-NEXT:    retl
+;
+; X64-AVX-LABEL: blend_broadcasts_v1f64:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX-NEXT:    retq
+;
+; X86-AVX2-LABEL: blend_broadcasts_v1f64:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X86-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX2-NEXT:    retl
+;
+; X64-AVX2-LABEL: blend_broadcasts_v1f64:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX2-NEXT:    retq
+;
+; X86-AVX512-LABEL: blend_broadcasts_v1f64:
+; X86-AVX512:       # %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X86-AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX512-NEXT:    retl
+;
+; X64-AVX512-LABEL: blend_broadcasts_v1f64:
+; X64-AVX512:       # %bb.0:
+; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X64-AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX512-NEXT:    retq
+  %ld0 = load <1 x double>, ptr %p0, align 32
+  %ld1 = load <1 x double>, ptr %p1, align 32
+  %blend = shufflevector <1 x double> %ld0, <1 x double> %ld1, <4 x i32> <i32 0, i32 1, i32 1, i32 0>
+  ret <4 x double> %blend
+}
+
+define <4 x double> @blend_broadcasts_v1f64_4x(ptr %p0, ptr %p1) {
+; X86-SSE41-LABEL: blend_broadcasts_v1f64_4x:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; X86-SSE41-NEXT:    movaps %xmm2, %xmm0
+; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X86-SSE41-NEXT:    retl
+;
+; X64-SSE41-LABEL: blend_broadcasts_v1f64_4x:
+; X64-SSE41:       # %bb.0:
+; X64-SSE41-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X64-SSE41-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; X64-SSE41-NEXT:    movaps %xmm2, %xmm0
+; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X64-SSE41-NEXT:    retq
+;
+; X86-AVX-LABEL: blend_broadcasts_v1f64_4x:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX-NEXT:    retl
+;
+; X64-AVX-LABEL: blend_broadcasts_v1f64_4x:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX-NEXT:    retq
+;
+; X86-AVX2-LABEL: blend_broadcasts_v1f64_4x:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X86-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX2-NEXT:    retl
+;
+; X64-AVX2-LABEL: blend_broadcasts_v1f64_4x:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX2-NEXT:    retq
+;
+; X86-AVX512-LABEL: blend_broadcasts_v1f64_4x:
+; X86-AVX512:       # %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X86-AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX512-NEXT:    retl
+;
+; X64-AVX512-LABEL: blend_broadcasts_v1f64_4x:
+; X64-AVX512:       # %bb.0:
+; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X64-AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX512-NEXT:    retq
+  %ld0 = load <1 x double>, ptr %p0, align 32
+  %ld1 = load <1 x double>, ptr %p1, align 32
+  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> undef, <4 x i32> zeroinitializer
+  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> undef, <4 x i32> zeroinitializer
+  %blend = shufflevector <4 x double> %bcst0, <4 x double> %bcst1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
+  ret <4 x double> %blend
+}
+
+define <4 x double> @blend_broadcasts_v1f64_2x(ptr %p0, ptr %p1) {
+; X86-SSE41-LABEL: blend_broadcasts_v1f64_2x:
+; X86-SSE41:       # %bb.0:
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X86-SSE41-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; X86-SSE41-NEXT:    movaps %xmm2, %xmm0
+; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X86-SSE41-NEXT:    retl
+;
+; X64-SSE41-LABEL: blend_broadcasts_v1f64_2x:
+; X64-SSE41:       # %bb.0:
+; X64-SSE41-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; X64-SSE41-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; X64-SSE41-NEXT:    movaps %xmm2, %xmm0
+; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X64-SSE41-NEXT:    retq
+;
+; X86-AVX-LABEL: blend_broadcasts_v1f64_2x:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX-NEXT:    retl
+;
+; X64-AVX-LABEL: blend_broadcasts_v1f64_2x:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX-NEXT:    retq
+;
+; X86-AVX2-LABEL: blend_broadcasts_v1f64_2x:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X86-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX2-NEXT:    retl
+;
+; X64-AVX2-LABEL: blend_broadcasts_v1f64_2x:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX2-NEXT:    retq
+;
+; X86-AVX512-LABEL: blend_broadcasts_v1f64_2x:
+; X86-AVX512:       # %bb.0:
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X86-AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X86-AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX512-NEXT:    retl
+;
+; X64-AVX512-LABEL: blend_broadcasts_v1f64_2x:
+; X64-AVX512:       # %bb.0:
+; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
+; X64-AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX512-NEXT:    retq
+  %ld0 = load <1 x double>, ptr %p0, align 32
+  %ld1 = load <1 x double>, ptr %p1, align 32
+  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> undef, <2 x i32> zeroinitializer
+  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> undef, <2 x i32> zeroinitializer
+  %blend = shufflevector <2 x double> %bcst0, <2 x double> %bcst1, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+  ret <4 x double> %blend
+}

>From bad26819b45e751f7a93eee7a1697fbb0a773216 Mon Sep 17 00:00:00 2001
From: Leon Clark <leoclark at amd.com>
Date: Tue, 15 Apr 2025 07:59:42 +0100
Subject: [PATCH 2/5] Add lowering code and update tests.

---
 llvm/lib/Target/X86/X86ISelLowering.cpp |  33 ++++++
 llvm/test/CodeGen/X86/shuffle-blendw.ll | 144 +++++++++---------------
 2 files changed, 87 insertions(+), 90 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 84aaf86550842..382f089971537 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -9040,6 +9040,39 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
   MVT OpEltVT = Op.getOperand(0).getSimpleValueType();
   unsigned NumElems = Op.getNumOperands();
 
+  // Match BUILD_VECTOR of scalars that we can lower to X86ISD::BLENDI via
+  // shuffles.
+  //
+  //   v4f64 = BUILD_VECTOR X,Y,Y,X
+  //   >>>
+  //       t1: v4f64 = BUILD_VECTOR X,u,u,u
+  //     t3: v4f64 = vector_shuffle<0,u,u,0> t1, u
+  //       t2: v4f64 = BUILD_VECTOR Y,u,u,u
+  //     t4: v4f64 = vector_shuffle<u,0,0,u> t2, u
+  //   v4f64 = vector_shuffle<0,5,6,3> t3, t4
+  //
+  if (Subtarget.hasAVX() && VT == MVT::v4f64 && Op->getNumOperands() == 4u) {
+    auto Op0 = Op->getOperand(0u);
+    auto Op1 = Op->getOperand(1u);
+    auto Op2 = Op->getOperand(2u);
+    auto Op3 = Op->getOperand(3u);
+
+    // Match X,Y,Y,X inputs.
+    if (Op0 == Op3 && Op1 == Op2 && Op0 != Op1) {
+      auto PsnVal = DAG.getUNDEF(MVT::f64);
+
+      auto NewOp0 = DAG.getBuildVector(VT, dl, {Op0, PsnVal, PsnVal, PsnVal});
+      NewOp0 = DAG.getVectorShuffle(VT, dl, NewOp0, DAG.getUNDEF(VT),
+                                    {0, -1, -1, 0});
+
+      auto NewOp1 = DAG.getBuildVector(VT, dl, {Op1, PsnVal, PsnVal, PsnVal});
+      NewOp1 = DAG.getVectorShuffle(VT, dl, NewOp1, DAG.getUNDEF(VT),
+                                    {-1, 0, 0, -1});
+
+      return DAG.getVectorShuffle(VT, dl, NewOp0, NewOp1, {0, 5, 6, 3});
+    }
+  }
+
   // Generate vectors for predicate vectors.
   if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
     return LowerBUILD_VECTORvXi1(Op, dl, DAG, Subtarget);
diff --git a/llvm/test/CodeGen/X86/shuffle-blendw.ll b/llvm/test/CodeGen/X86/shuffle-blendw.ll
index 28af382ec3e07..a1af29550f64f 100644
--- a/llvm/test/CodeGen/X86/shuffle-blendw.ll
+++ b/llvm/test/CodeGen/X86/shuffle-blendw.ll
@@ -449,60 +449,48 @@ define <4 x double> @blend_broadcasts_v1f64(ptr %p0, ptr %p1) {
 ; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX-NEXT:    vbroadcastsd (%ecx), %ymm0
+; X86-AVX-NEXT:    vbroadcastsd (%eax), %ymm1
+; X86-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-AVX-LABEL: blend_broadcasts_v1f64:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX-NEXT:    vbroadcastsd (%rsi), %ymm0
+; X64-AVX-NEXT:    vbroadcastsd (%rdi), %ymm1
+; X64-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X64-AVX-NEXT:    retq
 ;
 ; X86-AVX2-LABEL: blend_broadcasts_v1f64:
 ; X86-AVX2:       # %bb.0:
 ; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X86-AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X86-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX2-NEXT:    vbroadcastsd (%ecx), %ymm0
+; X86-AVX2-NEXT:    vbroadcastsd (%eax), %ymm1
+; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-AVX2-LABEL: blend_broadcasts_v1f64:
 ; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX2-NEXT:    vbroadcastsd (%rsi), %ymm0
+; X64-AVX2-NEXT:    vbroadcastsd (%rdi), %ymm1
+; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X64-AVX2-NEXT:    retq
 ;
 ; X86-AVX512-LABEL: blend_broadcasts_v1f64:
 ; X86-AVX512:       # %bb.0:
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X86-AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X86-AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX512-NEXT:    vbroadcastsd (%ecx), %ymm0
+; X86-AVX512-NEXT:    vbroadcastsd (%eax), %ymm1
+; X86-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X86-AVX512-NEXT:    retl
 ;
 ; X64-AVX512-LABEL: blend_broadcasts_v1f64:
 ; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X64-AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X64-AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX512-NEXT:    vbroadcastsd (%rsi), %ymm0
+; X64-AVX512-NEXT:    vbroadcastsd (%rdi), %ymm1
+; X64-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X64-AVX512-NEXT:    retq
   %ld0 = load <1 x double>, ptr %p0, align 32
   %ld1 = load <1 x double>, ptr %p1, align 32
@@ -535,60 +523,48 @@ define <4 x double> @blend_broadcasts_v1f64_4x(ptr %p0, ptr %p1) {
 ; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX-NEXT:    vbroadcastsd (%ecx), %ymm0
+; X86-AVX-NEXT:    vbroadcastsd (%eax), %ymm1
+; X86-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-AVX-LABEL: blend_broadcasts_v1f64_4x:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX-NEXT:    vbroadcastsd (%rsi), %ymm0
+; X64-AVX-NEXT:    vbroadcastsd (%rdi), %ymm1
+; X64-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X64-AVX-NEXT:    retq
 ;
 ; X86-AVX2-LABEL: blend_broadcasts_v1f64_4x:
 ; X86-AVX2:       # %bb.0:
 ; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X86-AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X86-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX2-NEXT:    vbroadcastsd (%ecx), %ymm0
+; X86-AVX2-NEXT:    vbroadcastsd (%eax), %ymm1
+; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-AVX2-LABEL: blend_broadcasts_v1f64_4x:
 ; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX2-NEXT:    vbroadcastsd (%rsi), %ymm0
+; X64-AVX2-NEXT:    vbroadcastsd (%rdi), %ymm1
+; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X64-AVX2-NEXT:    retq
 ;
 ; X86-AVX512-LABEL: blend_broadcasts_v1f64_4x:
 ; X86-AVX512:       # %bb.0:
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X86-AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X86-AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX512-NEXT:    vbroadcastsd (%ecx), %ymm0
+; X86-AVX512-NEXT:    vbroadcastsd (%eax), %ymm1
+; X86-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X86-AVX512-NEXT:    retl
 ;
 ; X64-AVX512-LABEL: blend_broadcasts_v1f64_4x:
 ; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X64-AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X64-AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX512-NEXT:    vbroadcastsd (%rsi), %ymm0
+; X64-AVX512-NEXT:    vbroadcastsd (%rdi), %ymm1
+; X64-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X64-AVX512-NEXT:    retq
   %ld0 = load <1 x double>, ptr %p0, align 32
   %ld1 = load <1 x double>, ptr %p1, align 32
@@ -623,60 +599,48 @@ define <4 x double> @blend_broadcasts_v1f64_2x(ptr %p0, ptr %p1) {
 ; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X86-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX-NEXT:    vbroadcastsd (%ecx), %ymm0
+; X86-AVX-NEXT:    vbroadcastsd (%eax), %ymm1
+; X86-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-AVX-LABEL: blend_broadcasts_v1f64_2x:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X64-AVX-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX-NEXT:    vbroadcastsd (%rsi), %ymm0
+; X64-AVX-NEXT:    vbroadcastsd (%rdi), %ymm1
+; X64-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X64-AVX-NEXT:    retq
 ;
 ; X86-AVX2-LABEL: blend_broadcasts_v1f64_2x:
 ; X86-AVX2:       # %bb.0:
 ; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X86-AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X86-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX2-NEXT:    vbroadcastsd (%ecx), %ymm0
+; X86-AVX2-NEXT:    vbroadcastsd (%eax), %ymm1
+; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X86-AVX2-NEXT:    retl
 ;
 ; X64-AVX2-LABEL: blend_broadcasts_v1f64_2x:
 ; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X64-AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX2-NEXT:    vbroadcastsd (%rsi), %ymm0
+; X64-AVX2-NEXT:    vbroadcastsd (%rdi), %ymm1
+; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X64-AVX2-NEXT:    retq
 ;
 ; X86-AVX512-LABEL: blend_broadcasts_v1f64_2x:
 ; X86-AVX512:       # %bb.0:
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X86-AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X86-AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX512-NEXT:    vbroadcastsd (%ecx), %ymm0
+; X86-AVX512-NEXT:    vbroadcastsd (%eax), %ymm1
+; X86-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X86-AVX512-NEXT:    retl
 ;
 ; X64-AVX512-LABEL: blend_broadcasts_v1f64_2x:
 ; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X64-AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0]
-; X64-AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX512-NEXT:    vbroadcastsd (%rsi), %ymm0
+; X64-AVX512-NEXT:    vbroadcastsd (%rdi), %ymm1
+; X64-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; X64-AVX512-NEXT:    retq
   %ld0 = load <1 x double>, ptr %p0, align 32
   %ld1 = load <1 x double>, ptr %p1, align 32

>From 420d457ab340ea3368fa1c66ab1cd2fc40c3b3d8 Mon Sep 17 00:00:00 2001
From: Leon Clark <leoclark at amd.com>
Date: Tue, 15 Apr 2025 08:25:03 +0100
Subject: [PATCH 3/5] Replace undef in tests.

---
 llvm/test/CodeGen/X86/shuffle-blendw.ll | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/llvm/test/CodeGen/X86/shuffle-blendw.ll b/llvm/test/CodeGen/X86/shuffle-blendw.ll
index a1af29550f64f..20239362d2480 100644
--- a/llvm/test/CodeGen/X86/shuffle-blendw.ll
+++ b/llvm/test/CodeGen/X86/shuffle-blendw.ll
@@ -334,8 +334,8 @@ define <4 x double> @blend_broadcasts_v4f64(ptr %p0, ptr %p1)  {
 ; X64-AVX512-NEXT:    retq
   %ld0 = load <4 x double>, ptr %p0, align 32
   %ld1 = load <4 x double>, ptr %p1, align 32
-  %bcst0 = shufflevector <4 x double> %ld0, <4 x double> undef, <4 x i32> zeroinitializer
-  %bcst1 = shufflevector <4 x double> %ld1, <4 x double> undef, <4 x i32> zeroinitializer
+  %bcst0 = shufflevector <4 x double> %ld0, <4 x double> poison, <4 x i32> zeroinitializer
+  %bcst1 = shufflevector <4 x double> %ld1, <4 x double> poison, <4 x i32> zeroinitializer
   %blend = shufflevector <4 x double> %bcst0, <4 x double> %bcst1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
   ret <4 x double> %blend
 }
@@ -568,8 +568,8 @@ define <4 x double> @blend_broadcasts_v1f64_4x(ptr %p0, ptr %p1) {
 ; X64-AVX512-NEXT:    retq
   %ld0 = load <1 x double>, ptr %p0, align 32
   %ld1 = load <1 x double>, ptr %p1, align 32
-  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> undef, <4 x i32> zeroinitializer
-  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> undef, <4 x i32> zeroinitializer
+  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> poison, <4 x i32> zeroinitializer
+  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> poison, <4 x i32> zeroinitializer
   %blend = shufflevector <4 x double> %bcst0, <4 x double> %bcst1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
   ret <4 x double> %blend
 }
@@ -644,8 +644,8 @@ define <4 x double> @blend_broadcasts_v1f64_2x(ptr %p0, ptr %p1) {
 ; X64-AVX512-NEXT:    retq
   %ld0 = load <1 x double>, ptr %p0, align 32
   %ld1 = load <1 x double>, ptr %p1, align 32
-  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> undef, <2 x i32> zeroinitializer
-  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> undef, <2 x i32> zeroinitializer
+  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> poison, <2 x i32> zeroinitializer
+  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> poison, <2 x i32> zeroinitializer
   %blend = shufflevector <2 x double> %bcst0, <2 x double> %bcst1, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
   ret <4 x double> %blend
 }

>From 301bc144777c05c0e5524e6eb696124bb827bf5a Mon Sep 17 00:00:00 2001
From: Leon Clark <leoclark at amd.com>
Date: Wed, 16 Apr 2025 06:43:27 +0100
Subject: [PATCH 4/5] Address comments.

---
 llvm/lib/Target/X86/X86ISelLowering.cpp       |  62 ++-
 llvm/test/CodeGen/X86/shuffle-blendw.ll       | 386 ------------------
 .../test/CodeGen/X86/vector-shuffle-256-v4.ll |  81 ++++
 3 files changed, 110 insertions(+), 419 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 382f089971537..da958271aab0a 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -8690,6 +8690,33 @@ static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op, const SDLoc &DL,
   return LowerShift(Res, Subtarget, DAG);
 }
 
+/// Attempt to lower a BUILD_VECTOR of scalar values to a shuffle of splats
+/// representing a blend.
+static SDValue lowerBuildVectorAsBlend(BuildVectorSDNode *BVOp, SDLoc const &DL,
+                                       X86Subtarget const &Subtarget,
+                                       SelectionDAG &DAG) {
+  if (!Subtarget.hasAVX())
+    return {};
+
+  auto VT = BVOp->getSimpleValueType(0u);
+
+  if (VT == MVT::v4f64 && BVOp->getNumOperands() == 4u) {
+    SDValue Op0 = BVOp->getOperand(0u);
+    SDValue Op1 = BVOp->getOperand(1u);
+    SDValue Op2 = BVOp->getOperand(2u);
+    SDValue Op3 = BVOp->getOperand(3u);
+
+    // Match X,Y,Y,X inputs.
+    if (Op0 == Op3 && Op1 == Op2 && Op0 != Op1) {
+      auto NewOp0 = DAG.getSplatBuildVector(VT, DL, Op0);
+      auto NewOp1 = DAG.getSplatBuildVector(VT, DL, Op1);
+      return DAG.getVectorShuffle(VT, DL, NewOp0, NewOp1, {0, 5, 6, 3});
+    }
+  }
+
+  return {};
+}
+
 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
 /// functionality to do this, so it's all zeros, all ones, or some derivation
 /// that is cheap to calculate.
@@ -9040,39 +9067,6 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
   MVT OpEltVT = Op.getOperand(0).getSimpleValueType();
   unsigned NumElems = Op.getNumOperands();
 
-  // Match BUILD_VECTOR of scalars that we can lower to X86ISD::BLENDI via
-  // shuffles.
-  //
-  //   v4f64 = BUILD_VECTOR X,Y,Y,X
-  //   >>>
-  //       t1: v4f64 = BUILD_VECTOR X,u,u,u
-  //     t3: v4f64 = vector_shuffle<0,u,u,0> t1, u
-  //       t2: v4f64 = BUILD_VECTOR Y,u,u,u
-  //     t4: v4f64 = vector_shuffle<u,0,0,u> t2, u
-  //   v4f64 = vector_shuffle<0,5,6,3> t3, t4
-  //
-  if (Subtarget.hasAVX() && VT == MVT::v4f64 && Op->getNumOperands() == 4u) {
-    auto Op0 = Op->getOperand(0u);
-    auto Op1 = Op->getOperand(1u);
-    auto Op2 = Op->getOperand(2u);
-    auto Op3 = Op->getOperand(3u);
-
-    // Match X,Y,Y,X inputs.
-    if (Op0 == Op3 && Op1 == Op2 && Op0 != Op1) {
-      auto PsnVal = DAG.getUNDEF(MVT::f64);
-
-      auto NewOp0 = DAG.getBuildVector(VT, dl, {Op0, PsnVal, PsnVal, PsnVal});
-      NewOp0 = DAG.getVectorShuffle(VT, dl, NewOp0, DAG.getUNDEF(VT),
-                                    {0, -1, -1, 0});
-
-      auto NewOp1 = DAG.getBuildVector(VT, dl, {Op1, PsnVal, PsnVal, PsnVal});
-      NewOp1 = DAG.getVectorShuffle(VT, dl, NewOp1, DAG.getUNDEF(VT),
-                                    {-1, 0, 0, -1});
-
-      return DAG.getVectorShuffle(VT, dl, NewOp0, NewOp1, {0, 5, 6, 3});
-    }
-  }
-
   // Generate vectors for predicate vectors.
   if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
     return LowerBUILD_VECTORvXi1(Op, dl, DAG, Subtarget);
@@ -9185,6 +9179,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
     return Broadcast;
   if (SDValue BitOp = lowerBuildVectorToBitOp(BV, dl, Subtarget, DAG))
     return BitOp;
+  if (SDValue Blend = lowerBuildVectorAsBlend(BV, dl, Subtarget, DAG))
+    return Blend;
 
   unsigned NumZero = ZeroMask.popcount();
   unsigned NumNonZero = NonZeroMask.popcount();
diff --git a/llvm/test/CodeGen/X86/shuffle-blendw.ll b/llvm/test/CodeGen/X86/shuffle-blendw.ll
index 20239362d2480..9f90657dc64d1 100644
--- a/llvm/test/CodeGen/X86/shuffle-blendw.ll
+++ b/llvm/test/CodeGen/X86/shuffle-blendw.ll
@@ -263,389 +263,3 @@ define <8 x i16> @blendw_to_blendd_fail_16(<8 x i16> %x, <8 x i16> %y, <8 x i16>
   %shuffle = shufflevector <8 x i16> %x1, <8 x i16> %y, <8 x i32> <i32 8, i32 1, i32 10, i32 11, i32 4, i32 5, i32 14, i32 15>
   ret <8 x i16> %shuffle
 }
-
-define <4 x double> @blend_broadcasts_v4f64(ptr %p0, ptr %p1)  {
-; X86-SSE41-LABEL: blend_broadcasts_v4f64:
-; X86-SSE41:       # %bb.0:
-; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE41-NEXT:    movaps (%ecx), %xmm2
-; X86-SSE41-NEXT:    movaps (%eax), %xmm1
-; X86-SSE41-NEXT:    movaps %xmm2, %xmm0
-; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X86-SSE41-NEXT:    retl
-;
-; X64-SSE41-LABEL: blend_broadcasts_v4f64:
-; X64-SSE41:       # %bb.0:
-; X64-SSE41-NEXT:    movaps (%rdi), %xmm2
-; X64-SSE41-NEXT:    movaps (%rsi), %xmm1
-; X64-SSE41-NEXT:    movaps %xmm2, %xmm0
-; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X64-SSE41-NEXT:    retq
-;
-; X86-AVX-LABEL: blend_broadcasts_v4f64:
-; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX-NEXT:    vbroadcastsd (%ecx), %ymm0
-; X86-AVX-NEXT:    vbroadcastsd (%eax), %ymm1
-; X86-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
-; X86-AVX-NEXT:    retl
-;
-; X64-AVX-LABEL: blend_broadcasts_v4f64:
-; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vbroadcastsd (%rdi), %ymm0
-; X64-AVX-NEXT:    vbroadcastsd (%rsi), %ymm1
-; X64-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
-; X64-AVX-NEXT:    retq
-;
-; X86-AVX2-LABEL: blend_broadcasts_v4f64:
-; X86-AVX2:       # %bb.0:
-; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX2-NEXT:    vbroadcastsd (%ecx), %ymm0
-; X86-AVX2-NEXT:    vbroadcastsd (%eax), %ymm1
-; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
-; X86-AVX2-NEXT:    retl
-;
-; X64-AVX2-LABEL: blend_broadcasts_v4f64:
-; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vbroadcastsd (%rdi), %ymm0
-; X64-AVX2-NEXT:    vbroadcastsd (%rsi), %ymm1
-; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
-; X64-AVX2-NEXT:    retq
-;
-; X86-AVX512-LABEL: blend_broadcasts_v4f64:
-; X86-AVX512:       # %bb.0:
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512-NEXT:    vbroadcastsd (%ecx), %ymm0
-; X86-AVX512-NEXT:    vbroadcastsd (%eax), %ymm1
-; X86-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
-; X86-AVX512-NEXT:    retl
-;
-; X64-AVX512-LABEL: blend_broadcasts_v4f64:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vbroadcastsd (%rdi), %ymm0
-; X64-AVX512-NEXT:    vbroadcastsd (%rsi), %ymm1
-; X64-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
-; X64-AVX512-NEXT:    retq
-  %ld0 = load <4 x double>, ptr %p0, align 32
-  %ld1 = load <4 x double>, ptr %p1, align 32
-  %bcst0 = shufflevector <4 x double> %ld0, <4 x double> poison, <4 x i32> zeroinitializer
-  %bcst1 = shufflevector <4 x double> %ld1, <4 x double> poison, <4 x i32> zeroinitializer
-  %blend = shufflevector <4 x double> %bcst0, <4 x double> %bcst1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
-  ret <4 x double> %blend
-}
-
-define <4 x double> @blend_broadcasts_v2f64(ptr %p0, ptr %p1) {
-; X86-SSE41-LABEL: blend_broadcasts_v2f64:
-; X86-SSE41:       # %bb.0:
-; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE41-NEXT:    movaps (%ecx), %xmm2
-; X86-SSE41-NEXT:    movaps (%eax), %xmm1
-; X86-SSE41-NEXT:    movaps %xmm2, %xmm0
-; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X86-SSE41-NEXT:    retl
-;
-; X64-SSE41-LABEL: blend_broadcasts_v2f64:
-; X64-SSE41:       # %bb.0:
-; X64-SSE41-NEXT:    movaps (%rdi), %xmm2
-; X64-SSE41-NEXT:    movaps (%rsi), %xmm1
-; X64-SSE41-NEXT:    movaps %xmm2, %xmm0
-; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X64-SSE41-NEXT:    retq
-;
-; X86-AVX-LABEL: blend_broadcasts_v2f64:
-; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X86-AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X86-AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; X86-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; X86-AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
-; X86-AVX-NEXT:    retl
-;
-; X64-AVX-LABEL: blend_broadcasts_v2f64:
-; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X64-AVX-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X64-AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; X64-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; X64-AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
-; X64-AVX-NEXT:    retq
-;
-; X86-AVX2-LABEL: blend_broadcasts_v2f64:
-; X86-AVX2:       # %bb.0:
-; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X86-AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; X86-AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
-; X86-AVX2-NEXT:    retl
-;
-; X64-AVX2-LABEL: blend_broadcasts_v2f64:
-; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X64-AVX2-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; X64-AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
-; X64-AVX2-NEXT:    retq
-;
-; X86-AVX512-LABEL: blend_broadcasts_v2f64:
-; X86-AVX512:       # %bb.0:
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X86-AVX512-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
-; X86-AVX512-NEXT:    vpmovsxbq {{.*#+}} ymm0 = [0,4,6,2]
-; X86-AVX512-NEXT:    vpermi2pd %ymm1, %ymm2, %ymm0
-; X86-AVX512-NEXT:    retl
-;
-; X64-AVX512-LABEL: blend_broadcasts_v2f64:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X64-AVX512-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
-; X64-AVX512-NEXT:    vpmovsxbq {{.*#+}} ymm0 = [0,4,6,2]
-; X64-AVX512-NEXT:    vpermi2pd %ymm1, %ymm2, %ymm0
-; X64-AVX512-NEXT:    retq
-  %ld0 = load <2 x double>, ptr %p0, align 32
-  %ld1 = load <2 x double>, ptr %p1, align 32
-  %blend = shufflevector <2 x double> %ld0, <2 x double> %ld1, <4 x i32> <i32 0, i32 2, i32 2, i32 0>
-  ret <4 x double> %blend
-}
-
-define <4 x double> @blend_broadcasts_v1f64(ptr %p0, ptr %p1) {
-; X86-SSE41-LABEL: blend_broadcasts_v1f64:
-; X86-SSE41:       # %bb.0:
-; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE41-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X86-SSE41-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; X86-SSE41-NEXT:    movaps %xmm2, %xmm0
-; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X86-SSE41-NEXT:    retl
-;
-; X64-SSE41-LABEL: blend_broadcasts_v1f64:
-; X64-SSE41:       # %bb.0:
-; X64-SSE41-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X64-SSE41-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; X64-SSE41-NEXT:    movaps %xmm2, %xmm0
-; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X64-SSE41-NEXT:    retq
-;
-; X86-AVX-LABEL: blend_broadcasts_v1f64:
-; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX-NEXT:    vbroadcastsd (%ecx), %ymm0
-; X86-AVX-NEXT:    vbroadcastsd (%eax), %ymm1
-; X86-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X86-AVX-NEXT:    retl
-;
-; X64-AVX-LABEL: blend_broadcasts_v1f64:
-; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vbroadcastsd (%rsi), %ymm0
-; X64-AVX-NEXT:    vbroadcastsd (%rdi), %ymm1
-; X64-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X64-AVX-NEXT:    retq
-;
-; X86-AVX2-LABEL: blend_broadcasts_v1f64:
-; X86-AVX2:       # %bb.0:
-; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX2-NEXT:    vbroadcastsd (%ecx), %ymm0
-; X86-AVX2-NEXT:    vbroadcastsd (%eax), %ymm1
-; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X86-AVX2-NEXT:    retl
-;
-; X64-AVX2-LABEL: blend_broadcasts_v1f64:
-; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vbroadcastsd (%rsi), %ymm0
-; X64-AVX2-NEXT:    vbroadcastsd (%rdi), %ymm1
-; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X64-AVX2-NEXT:    retq
-;
-; X86-AVX512-LABEL: blend_broadcasts_v1f64:
-; X86-AVX512:       # %bb.0:
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512-NEXT:    vbroadcastsd (%ecx), %ymm0
-; X86-AVX512-NEXT:    vbroadcastsd (%eax), %ymm1
-; X86-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X86-AVX512-NEXT:    retl
-;
-; X64-AVX512-LABEL: blend_broadcasts_v1f64:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vbroadcastsd (%rsi), %ymm0
-; X64-AVX512-NEXT:    vbroadcastsd (%rdi), %ymm1
-; X64-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X64-AVX512-NEXT:    retq
-  %ld0 = load <1 x double>, ptr %p0, align 32
-  %ld1 = load <1 x double>, ptr %p1, align 32
-  %blend = shufflevector <1 x double> %ld0, <1 x double> %ld1, <4 x i32> <i32 0, i32 1, i32 1, i32 0>
-  ret <4 x double> %blend
-}
-
-define <4 x double> @blend_broadcasts_v1f64_4x(ptr %p0, ptr %p1) {
-; X86-SSE41-LABEL: blend_broadcasts_v1f64_4x:
-; X86-SSE41:       # %bb.0:
-; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE41-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X86-SSE41-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; X86-SSE41-NEXT:    movaps %xmm2, %xmm0
-; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X86-SSE41-NEXT:    retl
-;
-; X64-SSE41-LABEL: blend_broadcasts_v1f64_4x:
-; X64-SSE41:       # %bb.0:
-; X64-SSE41-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X64-SSE41-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; X64-SSE41-NEXT:    movaps %xmm2, %xmm0
-; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X64-SSE41-NEXT:    retq
-;
-; X86-AVX-LABEL: blend_broadcasts_v1f64_4x:
-; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX-NEXT:    vbroadcastsd (%ecx), %ymm0
-; X86-AVX-NEXT:    vbroadcastsd (%eax), %ymm1
-; X86-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X86-AVX-NEXT:    retl
-;
-; X64-AVX-LABEL: blend_broadcasts_v1f64_4x:
-; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vbroadcastsd (%rsi), %ymm0
-; X64-AVX-NEXT:    vbroadcastsd (%rdi), %ymm1
-; X64-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X64-AVX-NEXT:    retq
-;
-; X86-AVX2-LABEL: blend_broadcasts_v1f64_4x:
-; X86-AVX2:       # %bb.0:
-; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX2-NEXT:    vbroadcastsd (%ecx), %ymm0
-; X86-AVX2-NEXT:    vbroadcastsd (%eax), %ymm1
-; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X86-AVX2-NEXT:    retl
-;
-; X64-AVX2-LABEL: blend_broadcasts_v1f64_4x:
-; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vbroadcastsd (%rsi), %ymm0
-; X64-AVX2-NEXT:    vbroadcastsd (%rdi), %ymm1
-; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X64-AVX2-NEXT:    retq
-;
-; X86-AVX512-LABEL: blend_broadcasts_v1f64_4x:
-; X86-AVX512:       # %bb.0:
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512-NEXT:    vbroadcastsd (%ecx), %ymm0
-; X86-AVX512-NEXT:    vbroadcastsd (%eax), %ymm1
-; X86-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X86-AVX512-NEXT:    retl
-;
-; X64-AVX512-LABEL: blend_broadcasts_v1f64_4x:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vbroadcastsd (%rsi), %ymm0
-; X64-AVX512-NEXT:    vbroadcastsd (%rdi), %ymm1
-; X64-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X64-AVX512-NEXT:    retq
-  %ld0 = load <1 x double>, ptr %p0, align 32
-  %ld1 = load <1 x double>, ptr %p1, align 32
-  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> poison, <4 x i32> zeroinitializer
-  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> poison, <4 x i32> zeroinitializer
-  %blend = shufflevector <4 x double> %bcst0, <4 x double> %bcst1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
-  ret <4 x double> %blend
-}
-
-define <4 x double> @blend_broadcasts_v1f64_2x(ptr %p0, ptr %p1) {
-; X86-SSE41-LABEL: blend_broadcasts_v1f64_2x:
-; X86-SSE41:       # %bb.0:
-; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE41-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE41-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X86-SSE41-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; X86-SSE41-NEXT:    movaps %xmm2, %xmm0
-; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X86-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X86-SSE41-NEXT:    retl
-;
-; X64-SSE41-LABEL: blend_broadcasts_v1f64_2x:
-; X64-SSE41:       # %bb.0:
-; X64-SSE41-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X64-SSE41-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; X64-SSE41-NEXT:    movaps %xmm2, %xmm0
-; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-SSE41-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X64-SSE41-NEXT:    retq
-;
-; X86-AVX-LABEL: blend_broadcasts_v1f64_2x:
-; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX-NEXT:    vbroadcastsd (%ecx), %ymm0
-; X86-AVX-NEXT:    vbroadcastsd (%eax), %ymm1
-; X86-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X86-AVX-NEXT:    retl
-;
-; X64-AVX-LABEL: blend_broadcasts_v1f64_2x:
-; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vbroadcastsd (%rsi), %ymm0
-; X64-AVX-NEXT:    vbroadcastsd (%rdi), %ymm1
-; X64-AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X64-AVX-NEXT:    retq
-;
-; X86-AVX2-LABEL: blend_broadcasts_v1f64_2x:
-; X86-AVX2:       # %bb.0:
-; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX2-NEXT:    vbroadcastsd (%ecx), %ymm0
-; X86-AVX2-NEXT:    vbroadcastsd (%eax), %ymm1
-; X86-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X86-AVX2-NEXT:    retl
-;
-; X64-AVX2-LABEL: blend_broadcasts_v1f64_2x:
-; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vbroadcastsd (%rsi), %ymm0
-; X64-AVX2-NEXT:    vbroadcastsd (%rdi), %ymm1
-; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X64-AVX2-NEXT:    retq
-;
-; X86-AVX512-LABEL: blend_broadcasts_v1f64_2x:
-; X86-AVX512:       # %bb.0:
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-AVX512-NEXT:    vbroadcastsd (%ecx), %ymm0
-; X86-AVX512-NEXT:    vbroadcastsd (%eax), %ymm1
-; X86-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X86-AVX512-NEXT:    retl
-;
-; X64-AVX512-LABEL: blend_broadcasts_v1f64_2x:
-; X64-AVX512:       # %bb.0:
-; X64-AVX512-NEXT:    vbroadcastsd (%rsi), %ymm0
-; X64-AVX512-NEXT:    vbroadcastsd (%rdi), %ymm1
-; X64-AVX512-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
-; X64-AVX512-NEXT:    retq
-  %ld0 = load <1 x double>, ptr %p0, align 32
-  %ld1 = load <1 x double>, ptr %p1, align 32
-  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> poison, <2 x i32> zeroinitializer
-  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> poison, <2 x i32> zeroinitializer
-  %blend = shufflevector <2 x double> %bcst0, <2 x double> %bcst1, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
-  ret <4 x double> %blend
-}
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
index d6208aca3b2b7..aef56e800836d 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
@@ -2360,6 +2360,87 @@ define <4 x double> @unpckh_v4f64(<4 x double> %x, <4 x double> %y) {
   ret <4 x double> %unpckh
 }
 
+define <4 x double> @blend_broadcasts_v4f64(ptr %p0, ptr %p1)  {
+; ALL-LABEL: blend_broadcasts_v4f64:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vbroadcastsd (%rdi), %ymm0
+; ALL-NEXT:    vbroadcastsd (%rsi), %ymm1
+; ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
+; ALL-NEXT:    retq
+  %ld0 = load <4 x double>, ptr %p0, align 32
+  %ld1 = load <4 x double>, ptr %p1, align 32
+  %bcst0 = shufflevector <4 x double> %ld0, <4 x double> undef, <4 x i32> zeroinitializer
+  %bcst1 = shufflevector <4 x double> %ld1, <4 x double> undef, <4 x i32> zeroinitializer
+  %blend = shufflevector <4 x double> %bcst0, <4 x double> %bcst1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
+  ret <4 x double> %blend
+}
+
+define <4 x double> @blend_broadcasts_v2f64(ptr %p0, ptr %p1) {
+; AVX1OR2-LABEL: blend_broadcasts_v2f64:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; AVX1OR2-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX1OR2-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1OR2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
+; AVX1OR2-NEXT:    retq
+;
+; AVX512VL-LABEL: blend_broadcasts_v2f64:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vbroadcastf128 {{.*#+}} ymm2 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpmovsxbq {{.*#+}} ymm0 = [0,4,6,2]
+; AVX512VL-NEXT:    vpermi2pd %ymm1, %ymm2, %ymm0
+; AVX512VL-NEXT:    retq
+  %ld0 = load <2 x double>, ptr %p0, align 32
+  %ld1 = load <2 x double>, ptr %p1, align 32
+  %blend = shufflevector <2 x double> %ld0, <2 x double> %ld1, <4 x i32> <i32 0, i32 2, i32 2, i32 0>
+  ret <4 x double> %blend
+}
+
+define <4 x double> @blend_broadcasts_v1f64(ptr %p0, ptr %p1) {
+; ALL-LABEL: blend_broadcasts_v1f64:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vbroadcastsd (%rsi), %ymm0
+; ALL-NEXT:    vbroadcastsd (%rdi), %ymm1
+; ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; ALL-NEXT:    retq
+  %ld0 = load <1 x double>, ptr %p0, align 32
+  %ld1 = load <1 x double>, ptr %p1, align 32
+  %blend = shufflevector <1 x double> %ld0, <1 x double> %ld1, <4 x i32> <i32 0, i32 1, i32 1, i32 0>
+  ret <4 x double> %blend
+}
+
+define <4 x double> @blend_broadcasts_v1f64_4x(ptr %p0, ptr %p1) {
+; ALL-LABEL: blend_broadcasts_v1f64_4x:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vbroadcastsd (%rsi), %ymm0
+; ALL-NEXT:    vbroadcastsd (%rdi), %ymm1
+; ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; ALL-NEXT:    retq
+  %ld0 = load <1 x double>, ptr %p0, align 32
+  %ld1 = load <1 x double>, ptr %p1, align 32
+  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> undef, <4 x i32> zeroinitializer
+  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> undef, <4 x i32> zeroinitializer
+  %blend = shufflevector <4 x double> %bcst0, <4 x double> %bcst1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
+  ret <4 x double> %blend
+}
+
+define <4 x double> @blend_broadcasts_v1f64_2x(ptr %p0, ptr %p1) {
+; ALL-LABEL: blend_broadcasts_v1f64_2x:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vbroadcastsd (%rsi), %ymm0
+; ALL-NEXT:    vbroadcastsd (%rdi), %ymm1
+; ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
+; ALL-NEXT:    retq
+  %ld0 = load <1 x double>, ptr %p0, align 32
+  %ld1 = load <1 x double>, ptr %p1, align 32
+  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> undef, <2 x i32> zeroinitializer
+  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> undef, <2 x i32> zeroinitializer
+  %blend = shufflevector <2 x double> %bcst0, <2 x double> %bcst1, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
+  ret <4 x double> %blend
+}
+
 !llvm.module.flags = !{!0}
 !0 = !{i32 1, !"ProfileSummary", !1}
 !1 = !{!2, !3, !4, !5, !6, !7, !8, !9}

>From 0ca84cb1138bc249be2371e138b63d6893ee9fc7 Mon Sep 17 00:00:00 2001
From: Leon Clark <leoclark at amd.com>
Date: Wed, 16 Apr 2025 06:50:20 +0100
Subject: [PATCH 5/5] Remove undef.

---
 llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
index aef56e800836d..d43aa25e361da 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
@@ -2369,8 +2369,8 @@ define <4 x double> @blend_broadcasts_v4f64(ptr %p0, ptr %p1)  {
 ; ALL-NEXT:    retq
   %ld0 = load <4 x double>, ptr %p0, align 32
   %ld1 = load <4 x double>, ptr %p1, align 32
-  %bcst0 = shufflevector <4 x double> %ld0, <4 x double> undef, <4 x i32> zeroinitializer
-  %bcst1 = shufflevector <4 x double> %ld1, <4 x double> undef, <4 x i32> zeroinitializer
+  %bcst0 = shufflevector <4 x double> %ld0, <4 x double> poison, <4 x i32> zeroinitializer
+  %bcst1 = shufflevector <4 x double> %ld1, <4 x double> poison, <4 x i32> zeroinitializer
   %blend = shufflevector <4 x double> %bcst0, <4 x double> %bcst1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
   ret <4 x double> %blend
 }
@@ -2420,8 +2420,8 @@ define <4 x double> @blend_broadcasts_v1f64_4x(ptr %p0, ptr %p1) {
 ; ALL-NEXT:    retq
   %ld0 = load <1 x double>, ptr %p0, align 32
   %ld1 = load <1 x double>, ptr %p1, align 32
-  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> undef, <4 x i32> zeroinitializer
-  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> undef, <4 x i32> zeroinitializer
+  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> poison, <4 x i32> zeroinitializer
+  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> poison, <4 x i32> zeroinitializer
   %blend = shufflevector <4 x double> %bcst0, <4 x double> %bcst1, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
   ret <4 x double> %blend
 }
@@ -2435,8 +2435,8 @@ define <4 x double> @blend_broadcasts_v1f64_2x(ptr %p0, ptr %p1) {
 ; ALL-NEXT:    retq
   %ld0 = load <1 x double>, ptr %p0, align 32
   %ld1 = load <1 x double>, ptr %p1, align 32
-  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> undef, <2 x i32> zeroinitializer
-  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> undef, <2 x i32> zeroinitializer
+  %bcst0 = shufflevector <1 x double> %ld0, <1 x double> poison, <2 x i32> zeroinitializer
+  %bcst1 = shufflevector <1 x double> %ld1, <1 x double> poison, <2 x i32> zeroinitializer
   %blend = shufflevector <2 x double> %bcst0, <2 x double> %bcst1, <4 x i32> <i32 0, i32 2, i32 3, i32 1>
   ret <4 x double> %blend
 }



More information about the llvm-commits mailing list