[llvm] e1ef679 - [X86] Add test coverage showing failure to load/binop combine adjacent v2f32 float ops
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 2 05:06:24 PST 2023
Author: Simon Pilgrim
Date: 2023-02-02T13:06:00Z
New Revision: e1ef6794b6f1de2e1b12627db24db26212c65969
URL: https://github.com/llvm/llvm-project/commit/e1ef6794b6f1de2e1b12627db24db26212c65969
DIFF: https://github.com/llvm/llvm-project/commit/e1ef6794b6f1de2e1b12627db24db26212c65969.diff
LOG: [X86] Add test coverage showing failure to load/binop combine adjacent v2f32 float ops
Pulled out of Issue #60441 - we really need that handling in the middle-end, but there's some obvious DAG cleanups we can try as well
Added:
llvm/test/CodeGen/X86/widen_fadd.ll
llvm/test/CodeGen/X86/widen_fdiv.ll
llvm/test/CodeGen/X86/widen_fmul.ll
llvm/test/CodeGen/X86/widen_fsub.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/widen_fadd.ll b/llvm/test/CodeGen/X86/widen_fadd.ll
new file mode 100644
index 000000000000..68f2ed436804
--- /dev/null
+++ b/llvm/test/CodeGen/X86/widen_fadd.ll
@@ -0,0 +1,366 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+
+define void @widen_fadd_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
+; SSE-LABEL: widen_fadd_v2f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: addps %xmm0, %xmm2
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: addps %xmm1, %xmm0
+; SSE-NEXT: movlps %xmm2, (%rdx)
+; SSE-NEXT: movlps %xmm0, 8(%rdx)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: widen_fadd_v2f32_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX-NEXT: vaddps %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX-NEXT: vaddps %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: vmovups %xmm0, (%rdx)
+; AVX-NEXT: retq
+ %a2 = getelementptr inbounds i8, ptr %a0, i64 8
+ %b2 = getelementptr inbounds i8, ptr %b0, i64 8
+ %c2 = getelementptr inbounds i8, ptr %c0, i64 8
+ %va0 = load <2 x float>, ptr %a0, align 4
+ %vb0 = load <2 x float>, ptr %b0, align 4
+ %va2 = load <2 x float>, ptr %a2, align 4
+ %vb2 = load <2 x float>, ptr %b2, align 4
+ %vc0 = fadd <2 x float> %va0, %vb0
+ %vc2 = fadd <2 x float> %va2, %vb2
+ store <2 x float> %vc0, ptr %c0, align 4
+ store <2 x float> %vc2, ptr %c2, align 4
+ ret void
+}
+
+define void @widen_fadd_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
+; SSE-LABEL: widen_fadd_v2f32_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: addps %xmm0, %xmm4
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: addps %xmm1, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: addps %xmm2, %xmm1
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: addps %xmm3, %xmm2
+; SSE-NEXT: movlps %xmm4, (%rdx)
+; SSE-NEXT: movlps %xmm0, 8(%rdx)
+; SSE-NEXT: movlps %xmm1, 16(%rdx)
+; SSE-NEXT: movlps %xmm2, 24(%rdx)
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fadd_v2f32_v8f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vaddps %xmm4, %xmm1, %xmm1
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vaddps %xmm4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vaddps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
+; AVX1OR2-NEXT: vzeroupper
+; AVX1OR2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fadd_v2f32_v8f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vaddps %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vaddps %xmm4, %xmm2, %xmm2
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vaddps %xmm4, %xmm3, %xmm3
+; AVX512F-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512F-NEXT: vmovups %ymm0, (%rdx)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fadd_v2f32_v8f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm4, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm4, %xmm3, %xmm3
+; AVX512VL-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX512VL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512VL-NEXT: vmovups %ymm0, (%rdx)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a2 = getelementptr inbounds i8, ptr %a0, i64 8
+ %b2 = getelementptr inbounds i8, ptr %b0, i64 8
+ %c2 = getelementptr inbounds i8, ptr %c0, i64 8
+ %a4 = getelementptr inbounds i8, ptr %a0, i64 16
+ %b4 = getelementptr inbounds i8, ptr %b0, i64 16
+ %c4 = getelementptr inbounds i8, ptr %c0, i64 16
+ %a6 = getelementptr inbounds i8, ptr %a0, i64 24
+ %b6 = getelementptr inbounds i8, ptr %b0, i64 24
+ %c6 = getelementptr inbounds i8, ptr %c0, i64 24
+ %va0 = load <2 x float>, ptr %a0, align 4
+ %vb0 = load <2 x float>, ptr %b0, align 4
+ %va2 = load <2 x float>, ptr %a2, align 4
+ %vb2 = load <2 x float>, ptr %b2, align 4
+ %va4 = load <2 x float>, ptr %a4, align 4
+ %vb4 = load <2 x float>, ptr %b4, align 4
+ %va6 = load <2 x float>, ptr %a6, align 4
+ %vb6 = load <2 x float>, ptr %b6, align 4
+ %vc0 = fadd <2 x float> %va0, %vb0
+ %vc2 = fadd <2 x float> %va2, %vb2
+ %vc4 = fadd <2 x float> %va4, %vb4
+ %vc6 = fadd <2 x float> %va6, %vb6
+ store <2 x float> %vc0, ptr %c0, align 4
+ store <2 x float> %vc2, ptr %c2, align 4
+ store <2 x float> %vc4, ptr %c4, align 4
+ store <2 x float> %vc6, ptr %c6, align 4
+ ret void
+}
+
+define void @widen_fadd_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
+; SSE-LABEL: widen_fadd_v2f32_v16f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: addps %xmm0, %xmm4
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: addps %xmm1, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: addps %xmm2, %xmm1
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: addps %xmm3, %xmm2
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm5 = mem[0],zero
+; SSE-NEXT: addps %xmm3, %xmm5
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm6 = mem[0],zero
+; SSE-NEXT: addps %xmm3, %xmm6
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm7 = mem[0],zero
+; SSE-NEXT: addps %xmm3, %xmm7
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm8 = mem[0],zero
+; SSE-NEXT: addps %xmm3, %xmm8
+; SSE-NEXT: movlps %xmm4, (%rdx)
+; SSE-NEXT: movlps %xmm0, 8(%rdx)
+; SSE-NEXT: movlps %xmm1, 16(%rdx)
+; SSE-NEXT: movlps %xmm2, 24(%rdx)
+; SSE-NEXT: movlps %xmm5, 32(%rdx)
+; SSE-NEXT: movlps %xmm6, 40(%rdx)
+; SSE-NEXT: movlps %xmm7, 48(%rdx)
+; SSE-NEXT: movlps %xmm8, 56(%rdx)
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fadd_v2f32_v16f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vaddps %xmm4, %xmm1, %xmm1
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vaddps %xmm4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vaddps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX1OR2-NEXT: vaddps %xmm5, %xmm4, %xmm4
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX1OR2-NEXT: vaddps %xmm6, %xmm5, %xmm5
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX1OR2-NEXT: vaddps %xmm7, %xmm6, %xmm6
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; AVX1OR2-NEXT: vaddps %xmm7, %xmm8, %xmm7
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm1
+; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX1OR2-NEXT: vmovups %ymm0, 32(%rdx)
+; AVX1OR2-NEXT: vzeroupper
+; AVX1OR2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fadd_v2f32_v16f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vaddps %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vaddps %xmm4, %xmm2, %xmm2
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vaddps %xmm4, %xmm3, %xmm3
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-NEXT: vaddps %xmm5, %xmm4, %xmm4
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512F-NEXT: vaddps %xmm6, %xmm5, %xmm5
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512F-NEXT: vaddps %xmm7, %xmm6, %xmm6
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; AVX512F-NEXT: vaddps %xmm7, %xmm8, %xmm7
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
+; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512F-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT: vpermt2pd %zmm6, %zmm5, %zmm4
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpermt2pd %zmm2, %zmm5, %zmm0
+; AVX512F-NEXT: vinsertf64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT: vmovupd %zmm0, (%rdx)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fadd_v2f32_v16f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm4, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm4, %xmm3, %xmm3
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm5, %xmm4, %xmm4
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm6, %xmm5, %xmm5
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm7, %xmm6, %xmm6
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; AVX512VL-NEXT: vaddps %xmm7, %xmm8, %xmm7
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
+; AVX512VL-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512VL-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512VL-NEXT: vpermi2pd %zmm6, %zmm4, %zmm5
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
+; AVX512VL-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,4,6]
+; AVX512VL-NEXT: vpermi2pd %ymm2, %ymm0, %ymm1
+; AVX512VL-NEXT: vinsertf64x4 $0, %ymm1, %zmm5, %zmm0
+; AVX512VL-NEXT: vmovupd %zmm0, (%rdx)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a2 = getelementptr inbounds i8, ptr %a0, i64 8
+ %b2 = getelementptr inbounds i8, ptr %b0, i64 8
+ %c2 = getelementptr inbounds i8, ptr %c0, i64 8
+ %a4 = getelementptr inbounds i8, ptr %a0, i64 16
+ %b4 = getelementptr inbounds i8, ptr %b0, i64 16
+ %c4 = getelementptr inbounds i8, ptr %c0, i64 16
+ %a6 = getelementptr inbounds i8, ptr %a0, i64 24
+ %b6 = getelementptr inbounds i8, ptr %b0, i64 24
+ %c6 = getelementptr inbounds i8, ptr %c0, i64 24
+ %a8 = getelementptr inbounds i8, ptr %a0, i64 32
+ %b8 = getelementptr inbounds i8, ptr %b0, i64 32
+ %c8 = getelementptr inbounds i8, ptr %c0, i64 32
+ %a10 = getelementptr inbounds i8, ptr %a0, i64 40
+ %b10 = getelementptr inbounds i8, ptr %b0, i64 40
+ %c10 = getelementptr inbounds i8, ptr %c0, i64 40
+ %a12 = getelementptr inbounds i8, ptr %a0, i64 48
+ %b12 = getelementptr inbounds i8, ptr %b0, i64 48
+ %c12 = getelementptr inbounds i8, ptr %c0, i64 48
+ %a14 = getelementptr inbounds i8, ptr %a0, i64 56
+ %b14 = getelementptr inbounds i8, ptr %b0, i64 56
+ %c14 = getelementptr inbounds i8, ptr %c0, i64 56
+ %va0 = load <2 x float>, ptr %a0, align 4
+ %vb0 = load <2 x float>, ptr %b0, align 4
+ %va2 = load <2 x float>, ptr %a2, align 4
+ %vb2 = load <2 x float>, ptr %b2, align 4
+ %va4 = load <2 x float>, ptr %a4, align 4
+ %vb4 = load <2 x float>, ptr %b4, align 4
+ %va6 = load <2 x float>, ptr %a6, align 4
+ %vb6 = load <2 x float>, ptr %b6, align 4
+ %va8 = load <2 x float>, ptr %a8, align 4
+ %vb8 = load <2 x float>, ptr %b8, align 4
+ %va10 = load <2 x float>, ptr %a10, align 4
+ %vb10 = load <2 x float>, ptr %b10, align 4
+ %va12 = load <2 x float>, ptr %a12, align 4
+ %vb12 = load <2 x float>, ptr %b12, align 4
+ %va14 = load <2 x float>, ptr %a14, align 4
+ %vb14 = load <2 x float>, ptr %b14, align 4
+ %vc0 = fadd <2 x float> %va0, %vb0
+ %vc2 = fadd <2 x float> %va2, %vb2
+ %vc4 = fadd <2 x float> %va4, %vb4
+ %vc6 = fadd <2 x float> %va6, %vb6
+ %vc8 = fadd <2 x float> %va8, %vb8
+ %vc10 = fadd <2 x float> %va10, %vb10
+ %vc12 = fadd <2 x float> %va12, %vb12
+ %vc14 = fadd <2 x float> %va14, %vb14
+ store <2 x float> %vc0, ptr %c0, align 4
+ store <2 x float> %vc2, ptr %c2, align 4
+ store <2 x float> %vc4, ptr %c4, align 4
+ store <2 x float> %vc6, ptr %c6, align 4
+ store <2 x float> %vc8, ptr %c8, align 4
+ store <2 x float> %vc10, ptr %c10, align 4
+ store <2 x float> %vc12, ptr %c12, align 4
+ store <2 x float> %vc14, ptr %c14, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/widen_fdiv.ll b/llvm/test/CodeGen/X86/widen_fdiv.ll
new file mode 100644
index 000000000000..f2ffa4bde22c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/widen_fdiv.ll
@@ -0,0 +1,366 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+
+define void @widen_fdiv_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
+; SSE-LABEL: widen_fdiv_v2f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: divps %xmm2, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: divps %xmm2, %xmm1
+; SSE-NEXT: movlps %xmm0, (%rdx)
+; SSE-NEXT: movlps %xmm1, 8(%rdx)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: widen_fdiv_v2f32_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX-NEXT: vdivps %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX-NEXT: vdivps %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: vmovups %xmm0, (%rdx)
+; AVX-NEXT: retq
+ %a2 = getelementptr inbounds i8, ptr %a0, i64 8
+ %b2 = getelementptr inbounds i8, ptr %b0, i64 8
+ %c2 = getelementptr inbounds i8, ptr %c0, i64 8
+ %va0 = load <2 x float>, ptr %a0, align 4
+ %vb0 = load <2 x float>, ptr %b0, align 4
+ %va2 = load <2 x float>, ptr %a2, align 4
+ %vb2 = load <2 x float>, ptr %b2, align 4
+ %vc0 = fdiv <2 x float> %va0, %vb0
+ %vc2 = fdiv <2 x float> %va2, %vb2
+ store <2 x float> %vc0, ptr %c0, align 4
+ store <2 x float> %vc2, ptr %c2, align 4
+ ret void
+}
+
+define void @widen_fdiv_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
+; SSE-LABEL: widen_fdiv_v2f32_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: divps %xmm4, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: divps %xmm4, %xmm1
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: divps %xmm4, %xmm2
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: divps %xmm4, %xmm3
+; SSE-NEXT: movlps %xmm0, (%rdx)
+; SSE-NEXT: movlps %xmm1, 8(%rdx)
+; SSE-NEXT: movlps %xmm2, 16(%rdx)
+; SSE-NEXT: movlps %xmm3, 24(%rdx)
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fdiv_v2f32_v8f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vdivps %xmm4, %xmm0, %xmm0
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vdivps %xmm4, %xmm1, %xmm1
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vdivps %xmm4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vdivps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
+; AVX1OR2-NEXT: vzeroupper
+; AVX1OR2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fdiv_v2f32_v8f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vdivps %xmm4, %xmm0, %xmm0
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vdivps %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vdivps %xmm4, %xmm2, %xmm2
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vdivps %xmm4, %xmm3, %xmm3
+; AVX512F-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512F-NEXT: vmovups %ymm0, (%rdx)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fdiv_v2f32_v8f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm4, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm4, %xmm3, %xmm3
+; AVX512VL-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX512VL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512VL-NEXT: vmovups %ymm0, (%rdx)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a2 = getelementptr inbounds i8, ptr %a0, i64 8
+ %b2 = getelementptr inbounds i8, ptr %b0, i64 8
+ %c2 = getelementptr inbounds i8, ptr %c0, i64 8
+ %a4 = getelementptr inbounds i8, ptr %a0, i64 16
+ %b4 = getelementptr inbounds i8, ptr %b0, i64 16
+ %c4 = getelementptr inbounds i8, ptr %c0, i64 16
+ %a6 = getelementptr inbounds i8, ptr %a0, i64 24
+ %b6 = getelementptr inbounds i8, ptr %b0, i64 24
+ %c6 = getelementptr inbounds i8, ptr %c0, i64 24
+ %va0 = load <2 x float>, ptr %a0, align 4
+ %vb0 = load <2 x float>, ptr %b0, align 4
+ %va2 = load <2 x float>, ptr %a2, align 4
+ %vb2 = load <2 x float>, ptr %b2, align 4
+ %va4 = load <2 x float>, ptr %a4, align 4
+ %vb4 = load <2 x float>, ptr %b4, align 4
+ %va6 = load <2 x float>, ptr %a6, align 4
+ %vb6 = load <2 x float>, ptr %b6, align 4
+ %vc0 = fdiv <2 x float> %va0, %vb0
+ %vc2 = fdiv <2 x float> %va2, %vb2
+ %vc4 = fdiv <2 x float> %va4, %vb4
+ %vc6 = fdiv <2 x float> %va6, %vb6
+ store <2 x float> %vc0, ptr %c0, align 4
+ store <2 x float> %vc2, ptr %c2, align 4
+ store <2 x float> %vc4, ptr %c4, align 4
+ store <2 x float> %vc6, ptr %c6, align 4
+ ret void
+}
+
+define void @widen_fdiv_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
+; SSE-LABEL: widen_fdiv_v2f32_v16f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: divps %xmm4, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: divps %xmm4, %xmm1
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: divps %xmm4, %xmm2
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: divps %xmm4, %xmm3
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm5 = mem[0],zero
+; SSE-NEXT: divps %xmm5, %xmm4
+; SSE-NEXT: movsd {{.*#+}} xmm5 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm6 = mem[0],zero
+; SSE-NEXT: divps %xmm6, %xmm5
+; SSE-NEXT: movsd {{.*#+}} xmm6 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm7 = mem[0],zero
+; SSE-NEXT: divps %xmm7, %xmm6
+; SSE-NEXT: movsd {{.*#+}} xmm7 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm8 = mem[0],zero
+; SSE-NEXT: divps %xmm8, %xmm7
+; SSE-NEXT: movlps %xmm0, (%rdx)
+; SSE-NEXT: movlps %xmm1, 8(%rdx)
+; SSE-NEXT: movlps %xmm2, 16(%rdx)
+; SSE-NEXT: movlps %xmm3, 24(%rdx)
+; SSE-NEXT: movlps %xmm4, 32(%rdx)
+; SSE-NEXT: movlps %xmm5, 40(%rdx)
+; SSE-NEXT: movlps %xmm6, 48(%rdx)
+; SSE-NEXT: movlps %xmm7, 56(%rdx)
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fdiv_v2f32_v16f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vdivps %xmm4, %xmm0, %xmm0
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vdivps %xmm4, %xmm1, %xmm1
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vdivps %xmm4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vdivps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX1OR2-NEXT: vdivps %xmm5, %xmm4, %xmm4
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX1OR2-NEXT: vdivps %xmm6, %xmm5, %xmm5
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX1OR2-NEXT: vdivps %xmm7, %xmm6, %xmm6
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; AVX1OR2-NEXT: vdivps %xmm8, %xmm7, %xmm7
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm1
+; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX1OR2-NEXT: vmovups %ymm0, 32(%rdx)
+; AVX1OR2-NEXT: vzeroupper
+; AVX1OR2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fdiv_v2f32_v16f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vdivps %xmm4, %xmm0, %xmm0
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vdivps %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vdivps %xmm4, %xmm2, %xmm2
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vdivps %xmm4, %xmm3, %xmm3
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-NEXT: vdivps %xmm5, %xmm4, %xmm4
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512F-NEXT: vdivps %xmm6, %xmm5, %xmm5
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512F-NEXT: vdivps %xmm7, %xmm6, %xmm6
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; AVX512F-NEXT: vdivps %xmm8, %xmm7, %xmm7
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
+; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512F-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT: vpermt2pd %zmm6, %zmm5, %zmm4
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpermt2pd %zmm2, %zmm5, %zmm0
+; AVX512F-NEXT: vinsertf64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT: vmovupd %zmm0, (%rdx)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fdiv_v2f32_v16f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm4, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm4, %xmm3, %xmm3
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm5, %xmm4, %xmm4
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm6, %xmm5, %xmm5
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm7, %xmm6, %xmm6
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; AVX512VL-NEXT: vdivps %xmm8, %xmm7, %xmm7
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
+; AVX512VL-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512VL-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512VL-NEXT: vpermi2pd %zmm6, %zmm4, %zmm5
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
+; AVX512VL-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,4,6]
+; AVX512VL-NEXT: vpermi2pd %ymm2, %ymm0, %ymm1
+; AVX512VL-NEXT: vinsertf64x4 $0, %ymm1, %zmm5, %zmm0
+; AVX512VL-NEXT: vmovupd %zmm0, (%rdx)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a2 = getelementptr inbounds i8, ptr %a0, i64 8
+ %b2 = getelementptr inbounds i8, ptr %b0, i64 8
+ %c2 = getelementptr inbounds i8, ptr %c0, i64 8
+ %a4 = getelementptr inbounds i8, ptr %a0, i64 16
+ %b4 = getelementptr inbounds i8, ptr %b0, i64 16
+ %c4 = getelementptr inbounds i8, ptr %c0, i64 16
+ %a6 = getelementptr inbounds i8, ptr %a0, i64 24
+ %b6 = getelementptr inbounds i8, ptr %b0, i64 24
+ %c6 = getelementptr inbounds i8, ptr %c0, i64 24
+ %a8 = getelementptr inbounds i8, ptr %a0, i64 32
+ %b8 = getelementptr inbounds i8, ptr %b0, i64 32
+ %c8 = getelementptr inbounds i8, ptr %c0, i64 32
+ %a10 = getelementptr inbounds i8, ptr %a0, i64 40
+ %b10 = getelementptr inbounds i8, ptr %b0, i64 40
+ %c10 = getelementptr inbounds i8, ptr %c0, i64 40
+ %a12 = getelementptr inbounds i8, ptr %a0, i64 48
+ %b12 = getelementptr inbounds i8, ptr %b0, i64 48
+ %c12 = getelementptr inbounds i8, ptr %c0, i64 48
+ %a14 = getelementptr inbounds i8, ptr %a0, i64 56
+ %b14 = getelementptr inbounds i8, ptr %b0, i64 56
+ %c14 = getelementptr inbounds i8, ptr %c0, i64 56
+ %va0 = load <2 x float>, ptr %a0, align 4
+ %vb0 = load <2 x float>, ptr %b0, align 4
+ %va2 = load <2 x float>, ptr %a2, align 4
+ %vb2 = load <2 x float>, ptr %b2, align 4
+ %va4 = load <2 x float>, ptr %a4, align 4
+ %vb4 = load <2 x float>, ptr %b4, align 4
+ %va6 = load <2 x float>, ptr %a6, align 4
+ %vb6 = load <2 x float>, ptr %b6, align 4
+ %va8 = load <2 x float>, ptr %a8, align 4
+ %vb8 = load <2 x float>, ptr %b8, align 4
+ %va10 = load <2 x float>, ptr %a10, align 4
+ %vb10 = load <2 x float>, ptr %b10, align 4
+ %va12 = load <2 x float>, ptr %a12, align 4
+ %vb12 = load <2 x float>, ptr %b12, align 4
+ %va14 = load <2 x float>, ptr %a14, align 4
+ %vb14 = load <2 x float>, ptr %b14, align 4
+ %vc0 = fdiv <2 x float> %va0, %vb0
+ %vc2 = fdiv <2 x float> %va2, %vb2
+ %vc4 = fdiv <2 x float> %va4, %vb4
+ %vc6 = fdiv <2 x float> %va6, %vb6
+ %vc8 = fdiv <2 x float> %va8, %vb8
+ %vc10 = fdiv <2 x float> %va10, %vb10
+ %vc12 = fdiv <2 x float> %va12, %vb12
+ %vc14 = fdiv <2 x float> %va14, %vb14
+ store <2 x float> %vc0, ptr %c0, align 4
+ store <2 x float> %vc2, ptr %c2, align 4
+ store <2 x float> %vc4, ptr %c4, align 4
+ store <2 x float> %vc6, ptr %c6, align 4
+ store <2 x float> %vc8, ptr %c8, align 4
+ store <2 x float> %vc10, ptr %c10, align 4
+ store <2 x float> %vc12, ptr %c12, align 4
+ store <2 x float> %vc14, ptr %c14, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/widen_fmul.ll b/llvm/test/CodeGen/X86/widen_fmul.ll
new file mode 100644
index 000000000000..ac208da9ee11
--- /dev/null
+++ b/llvm/test/CodeGen/X86/widen_fmul.ll
@@ -0,0 +1,366 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+
+define void @widen_fmul_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
+; SSE-LABEL: widen_fmul_v2f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: mulps %xmm0, %xmm2
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: mulps %xmm1, %xmm0
+; SSE-NEXT: movlps %xmm2, (%rdx)
+; SSE-NEXT: movlps %xmm0, 8(%rdx)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: widen_fmul_v2f32_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX-NEXT: vmulps %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX-NEXT: vmulps %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: vmovups %xmm0, (%rdx)
+; AVX-NEXT: retq
+ %a2 = getelementptr inbounds i8, ptr %a0, i64 8
+ %b2 = getelementptr inbounds i8, ptr %b0, i64 8
+ %c2 = getelementptr inbounds i8, ptr %c0, i64 8
+ %va0 = load <2 x float>, ptr %a0, align 4
+ %vb0 = load <2 x float>, ptr %b0, align 4
+ %va2 = load <2 x float>, ptr %a2, align 4
+ %vb2 = load <2 x float>, ptr %b2, align 4
+ %vc0 = fmul <2 x float> %va0, %vb0
+ %vc2 = fmul <2 x float> %va2, %vb2
+ store <2 x float> %vc0, ptr %c0, align 4
+ store <2 x float> %vc2, ptr %c2, align 4
+ ret void
+}
+
+define void @widen_fmul_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
+; SSE-LABEL: widen_fmul_v2f32_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: mulps %xmm0, %xmm4
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: mulps %xmm1, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: mulps %xmm2, %xmm1
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: mulps %xmm3, %xmm2
+; SSE-NEXT: movlps %xmm4, (%rdx)
+; SSE-NEXT: movlps %xmm0, 8(%rdx)
+; SSE-NEXT: movlps %xmm1, 16(%rdx)
+; SSE-NEXT: movlps %xmm2, 24(%rdx)
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fmul_v2f32_v8f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vmulps %xmm4, %xmm0, %xmm0
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vmulps %xmm4, %xmm1, %xmm1
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vmulps %xmm4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vmulps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
+; AVX1OR2-NEXT: vzeroupper
+; AVX1OR2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fmul_v2f32_v8f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vmulps %xmm4, %xmm0, %xmm0
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vmulps %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vmulps %xmm4, %xmm2, %xmm2
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vmulps %xmm4, %xmm3, %xmm3
+; AVX512F-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512F-NEXT: vmovups %ymm0, (%rdx)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fmul_v2f32_v8f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm4, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm4, %xmm3, %xmm3
+; AVX512VL-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX512VL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512VL-NEXT: vmovups %ymm0, (%rdx)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a2 = getelementptr inbounds i8, ptr %a0, i64 8
+ %b2 = getelementptr inbounds i8, ptr %b0, i64 8
+ %c2 = getelementptr inbounds i8, ptr %c0, i64 8
+ %a4 = getelementptr inbounds i8, ptr %a0, i64 16
+ %b4 = getelementptr inbounds i8, ptr %b0, i64 16
+ %c4 = getelementptr inbounds i8, ptr %c0, i64 16
+ %a6 = getelementptr inbounds i8, ptr %a0, i64 24
+ %b6 = getelementptr inbounds i8, ptr %b0, i64 24
+ %c6 = getelementptr inbounds i8, ptr %c0, i64 24
+ %va0 = load <2 x float>, ptr %a0, align 4
+ %vb0 = load <2 x float>, ptr %b0, align 4
+ %va2 = load <2 x float>, ptr %a2, align 4
+ %vb2 = load <2 x float>, ptr %b2, align 4
+ %va4 = load <2 x float>, ptr %a4, align 4
+ %vb4 = load <2 x float>, ptr %b4, align 4
+ %va6 = load <2 x float>, ptr %a6, align 4
+ %vb6 = load <2 x float>, ptr %b6, align 4
+ %vc0 = fmul <2 x float> %va0, %vb0
+ %vc2 = fmul <2 x float> %va2, %vb2
+ %vc4 = fmul <2 x float> %va4, %vb4
+ %vc6 = fmul <2 x float> %va6, %vb6
+ store <2 x float> %vc0, ptr %c0, align 4
+ store <2 x float> %vc2, ptr %c2, align 4
+ store <2 x float> %vc4, ptr %c4, align 4
+ store <2 x float> %vc6, ptr %c6, align 4
+ ret void
+}
+
+define void @widen_fmul_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
+; SSE-LABEL: widen_fmul_v2f32_v16f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: mulps %xmm0, %xmm4
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: mulps %xmm1, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: mulps %xmm2, %xmm1
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: mulps %xmm3, %xmm2
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm5 = mem[0],zero
+; SSE-NEXT: mulps %xmm3, %xmm5
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm6 = mem[0],zero
+; SSE-NEXT: mulps %xmm3, %xmm6
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm7 = mem[0],zero
+; SSE-NEXT: mulps %xmm3, %xmm7
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm8 = mem[0],zero
+; SSE-NEXT: mulps %xmm3, %xmm8
+; SSE-NEXT: movlps %xmm4, (%rdx)
+; SSE-NEXT: movlps %xmm0, 8(%rdx)
+; SSE-NEXT: movlps %xmm1, 16(%rdx)
+; SSE-NEXT: movlps %xmm2, 24(%rdx)
+; SSE-NEXT: movlps %xmm5, 32(%rdx)
+; SSE-NEXT: movlps %xmm6, 40(%rdx)
+; SSE-NEXT: movlps %xmm7, 48(%rdx)
+; SSE-NEXT: movlps %xmm8, 56(%rdx)
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fmul_v2f32_v16f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vmulps %xmm4, %xmm0, %xmm0
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vmulps %xmm4, %xmm1, %xmm1
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vmulps %xmm4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vmulps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX1OR2-NEXT: vmulps %xmm5, %xmm4, %xmm4
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX1OR2-NEXT: vmulps %xmm6, %xmm5, %xmm5
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX1OR2-NEXT: vmulps %xmm7, %xmm6, %xmm6
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; AVX1OR2-NEXT: vmulps %xmm7, %xmm8, %xmm7
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm1
+; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX1OR2-NEXT: vmovups %ymm0, 32(%rdx)
+; AVX1OR2-NEXT: vzeroupper
+; AVX1OR2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fmul_v2f32_v16f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vmulps %xmm4, %xmm0, %xmm0
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vmulps %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vmulps %xmm4, %xmm2, %xmm2
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vmulps %xmm4, %xmm3, %xmm3
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-NEXT: vmulps %xmm5, %xmm4, %xmm4
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512F-NEXT: vmulps %xmm6, %xmm5, %xmm5
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512F-NEXT: vmulps %xmm7, %xmm6, %xmm6
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; AVX512F-NEXT: vmulps %xmm7, %xmm8, %xmm7
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
+; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512F-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT: vpermt2pd %zmm6, %zmm5, %zmm4
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpermt2pd %zmm2, %zmm5, %zmm0
+; AVX512F-NEXT: vinsertf64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT: vmovupd %zmm0, (%rdx)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fmul_v2f32_v16f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm4, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm4, %xmm3, %xmm3
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm5, %xmm4, %xmm4
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm6, %xmm5, %xmm5
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm7, %xmm6, %xmm6
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; AVX512VL-NEXT: vmulps %xmm7, %xmm8, %xmm7
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
+; AVX512VL-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512VL-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512VL-NEXT: vpermi2pd %zmm6, %zmm4, %zmm5
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
+; AVX512VL-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,4,6]
+; AVX512VL-NEXT: vpermi2pd %ymm2, %ymm0, %ymm1
+; AVX512VL-NEXT: vinsertf64x4 $0, %ymm1, %zmm5, %zmm0
+; AVX512VL-NEXT: vmovupd %zmm0, (%rdx)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a2 = getelementptr inbounds i8, ptr %a0, i64 8
+ %b2 = getelementptr inbounds i8, ptr %b0, i64 8
+ %c2 = getelementptr inbounds i8, ptr %c0, i64 8
+ %a4 = getelementptr inbounds i8, ptr %a0, i64 16
+ %b4 = getelementptr inbounds i8, ptr %b0, i64 16
+ %c4 = getelementptr inbounds i8, ptr %c0, i64 16
+ %a6 = getelementptr inbounds i8, ptr %a0, i64 24
+ %b6 = getelementptr inbounds i8, ptr %b0, i64 24
+ %c6 = getelementptr inbounds i8, ptr %c0, i64 24
+ %a8 = getelementptr inbounds i8, ptr %a0, i64 32
+ %b8 = getelementptr inbounds i8, ptr %b0, i64 32
+ %c8 = getelementptr inbounds i8, ptr %c0, i64 32
+ %a10 = getelementptr inbounds i8, ptr %a0, i64 40
+ %b10 = getelementptr inbounds i8, ptr %b0, i64 40
+ %c10 = getelementptr inbounds i8, ptr %c0, i64 40
+ %a12 = getelementptr inbounds i8, ptr %a0, i64 48
+ %b12 = getelementptr inbounds i8, ptr %b0, i64 48
+ %c12 = getelementptr inbounds i8, ptr %c0, i64 48
+ %a14 = getelementptr inbounds i8, ptr %a0, i64 56
+ %b14 = getelementptr inbounds i8, ptr %b0, i64 56
+ %c14 = getelementptr inbounds i8, ptr %c0, i64 56
+ %va0 = load <2 x float>, ptr %a0, align 4
+ %vb0 = load <2 x float>, ptr %b0, align 4
+ %va2 = load <2 x float>, ptr %a2, align 4
+ %vb2 = load <2 x float>, ptr %b2, align 4
+ %va4 = load <2 x float>, ptr %a4, align 4
+ %vb4 = load <2 x float>, ptr %b4, align 4
+ %va6 = load <2 x float>, ptr %a6, align 4
+ %vb6 = load <2 x float>, ptr %b6, align 4
+ %va8 = load <2 x float>, ptr %a8, align 4
+ %vb8 = load <2 x float>, ptr %b8, align 4
+ %va10 = load <2 x float>, ptr %a10, align 4
+ %vb10 = load <2 x float>, ptr %b10, align 4
+ %va12 = load <2 x float>, ptr %a12, align 4
+ %vb12 = load <2 x float>, ptr %b12, align 4
+ %va14 = load <2 x float>, ptr %a14, align 4
+ %vb14 = load <2 x float>, ptr %b14, align 4
+ %vc0 = fmul <2 x float> %va0, %vb0
+ %vc2 = fmul <2 x float> %va2, %vb2
+ %vc4 = fmul <2 x float> %va4, %vb4
+ %vc6 = fmul <2 x float> %va6, %vb6
+ %vc8 = fmul <2 x float> %va8, %vb8
+ %vc10 = fmul <2 x float> %va10, %vb10
+ %vc12 = fmul <2 x float> %va12, %vb12
+ %vc14 = fmul <2 x float> %va14, %vb14
+ store <2 x float> %vc0, ptr %c0, align 4
+ store <2 x float> %vc2, ptr %c2, align 4
+ store <2 x float> %vc4, ptr %c4, align 4
+ store <2 x float> %vc6, ptr %c6, align 4
+ store <2 x float> %vc8, ptr %c8, align 4
+ store <2 x float> %vc10, ptr %c10, align 4
+ store <2 x float> %vc12, ptr %c12, align 4
+ store <2 x float> %vc14, ptr %c14, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/widen_fsub.ll b/llvm/test/CodeGen/X86/widen_fsub.ll
new file mode 100644
index 000000000000..90cf455ba61f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/widen_fsub.ll
@@ -0,0 +1,366 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+
+define void @widen_fsub_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
+; SSE-LABEL: widen_fsub_v2f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: subps %xmm2, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: subps %xmm2, %xmm1
+; SSE-NEXT: movlps %xmm0, (%rdx)
+; SSE-NEXT: movlps %xmm1, 8(%rdx)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: widen_fsub_v2f32_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX-NEXT: vsubps %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX-NEXT: vsubps %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: vmovups %xmm0, (%rdx)
+; AVX-NEXT: retq
+ %a2 = getelementptr inbounds i8, ptr %a0, i64 8
+ %b2 = getelementptr inbounds i8, ptr %b0, i64 8
+ %c2 = getelementptr inbounds i8, ptr %c0, i64 8
+ %va0 = load <2 x float>, ptr %a0, align 4
+ %vb0 = load <2 x float>, ptr %b0, align 4
+ %va2 = load <2 x float>, ptr %a2, align 4
+ %vb2 = load <2 x float>, ptr %b2, align 4
+ %vc0 = fsub <2 x float> %va0, %vb0
+ %vc2 = fsub <2 x float> %va2, %vb2
+ store <2 x float> %vc0, ptr %c0, align 4
+ store <2 x float> %vc2, ptr %c2, align 4
+ ret void
+}
+
+define void @widen_fsub_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
+; SSE-LABEL: widen_fsub_v2f32_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: subps %xmm4, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: subps %xmm4, %xmm1
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: subps %xmm4, %xmm2
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: subps %xmm4, %xmm3
+; SSE-NEXT: movlps %xmm0, (%rdx)
+; SSE-NEXT: movlps %xmm1, 8(%rdx)
+; SSE-NEXT: movlps %xmm2, 16(%rdx)
+; SSE-NEXT: movlps %xmm3, 24(%rdx)
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fsub_v2f32_v8f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vsubps %xmm4, %xmm0, %xmm0
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vsubps %xmm4, %xmm1, %xmm1
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vsubps %xmm4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vsubps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
+; AVX1OR2-NEXT: vzeroupper
+; AVX1OR2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fsub_v2f32_v8f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vsubps %xmm4, %xmm0, %xmm0
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vsubps %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vsubps %xmm4, %xmm2, %xmm2
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vsubps %xmm4, %xmm3, %xmm3
+; AVX512F-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512F-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512F-NEXT: vmovups %ymm0, (%rdx)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fsub_v2f32_v8f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm4, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm4, %xmm3, %xmm3
+; AVX512VL-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX512VL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512VL-NEXT: vmovups %ymm0, (%rdx)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a2 = getelementptr inbounds i8, ptr %a0, i64 8
+ %b2 = getelementptr inbounds i8, ptr %b0, i64 8
+ %c2 = getelementptr inbounds i8, ptr %c0, i64 8
+ %a4 = getelementptr inbounds i8, ptr %a0, i64 16
+ %b4 = getelementptr inbounds i8, ptr %b0, i64 16
+ %c4 = getelementptr inbounds i8, ptr %c0, i64 16
+ %a6 = getelementptr inbounds i8, ptr %a0, i64 24
+ %b6 = getelementptr inbounds i8, ptr %b0, i64 24
+ %c6 = getelementptr inbounds i8, ptr %c0, i64 24
+ %va0 = load <2 x float>, ptr %a0, align 4
+ %vb0 = load <2 x float>, ptr %b0, align 4
+ %va2 = load <2 x float>, ptr %a2, align 4
+ %vb2 = load <2 x float>, ptr %b2, align 4
+ %va4 = load <2 x float>, ptr %a4, align 4
+ %vb4 = load <2 x float>, ptr %b4, align 4
+ %va6 = load <2 x float>, ptr %a6, align 4
+ %vb6 = load <2 x float>, ptr %b6, align 4
+ %vc0 = fsub <2 x float> %va0, %vb0
+ %vc2 = fsub <2 x float> %va2, %vb2
+ %vc4 = fsub <2 x float> %va4, %vb4
+ %vc6 = fsub <2 x float> %va6, %vb6
+ store <2 x float> %vc0, ptr %c0, align 4
+ store <2 x float> %vc2, ptr %c2, align 4
+ store <2 x float> %vc4, ptr %c4, align 4
+ store <2 x float> %vc6, ptr %c6, align 4
+ ret void
+}
+
+define void @widen_fsub_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
+; SSE-LABEL: widen_fsub_v2f32_v16f32:
+; SSE: # %bb.0:
+; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: subps %xmm4, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: subps %xmm4, %xmm1
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: subps %xmm4, %xmm2
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: subps %xmm4, %xmm3
+; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm5 = mem[0],zero
+; SSE-NEXT: subps %xmm5, %xmm4
+; SSE-NEXT: movsd {{.*#+}} xmm5 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm6 = mem[0],zero
+; SSE-NEXT: subps %xmm6, %xmm5
+; SSE-NEXT: movsd {{.*#+}} xmm6 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm7 = mem[0],zero
+; SSE-NEXT: subps %xmm7, %xmm6
+; SSE-NEXT: movsd {{.*#+}} xmm7 = mem[0],zero
+; SSE-NEXT: movsd {{.*#+}} xmm8 = mem[0],zero
+; SSE-NEXT: subps %xmm8, %xmm7
+; SSE-NEXT: movlps %xmm0, (%rdx)
+; SSE-NEXT: movlps %xmm1, 8(%rdx)
+; SSE-NEXT: movlps %xmm2, 16(%rdx)
+; SSE-NEXT: movlps %xmm3, 24(%rdx)
+; SSE-NEXT: movlps %xmm4, 32(%rdx)
+; SSE-NEXT: movlps %xmm5, 40(%rdx)
+; SSE-NEXT: movlps %xmm6, 48(%rdx)
+; SSE-NEXT: movlps %xmm7, 56(%rdx)
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fsub_v2f32_v16f32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vsubps %xmm4, %xmm0, %xmm0
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vsubps %xmm4, %xmm1, %xmm1
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vsubps %xmm4, %xmm2, %xmm2
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vsubps %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX1OR2-NEXT: vsubps %xmm5, %xmm4, %xmm4
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX1OR2-NEXT: vsubps %xmm6, %xmm5, %xmm5
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX1OR2-NEXT: vsubps %xmm7, %xmm6, %xmm6
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; AVX1OR2-NEXT: vsubps %xmm8, %xmm7, %xmm7
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT: vmovups %ymm0, (%rdx)
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm1
+; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; AVX1OR2-NEXT: vmovups %ymm0, 32(%rdx)
+; AVX1OR2-NEXT: vzeroupper
+; AVX1OR2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fsub_v2f32_v16f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vsubps %xmm4, %xmm0, %xmm0
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vsubps %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vsubps %xmm4, %xmm2, %xmm2
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vsubps %xmm4, %xmm3, %xmm3
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-NEXT: vsubps %xmm5, %xmm4, %xmm4
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512F-NEXT: vsubps %xmm6, %xmm5, %xmm5
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512F-NEXT: vsubps %xmm7, %xmm6, %xmm6
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512F-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; AVX512F-NEXT: vsubps %xmm8, %xmm7, %xmm7
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
+; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512F-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-NEXT: vpermt2pd %zmm6, %zmm5, %zmm4
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
+; AVX512F-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpermt2pd %zmm2, %zmm5, %zmm0
+; AVX512F-NEXT: vinsertf64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512F-NEXT: vmovupd %zmm0, (%rdx)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fsub_v2f32_v16f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm4, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm4, %xmm3, %xmm3
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm5, %xmm4, %xmm4
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm6, %xmm5, %xmm5
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm7, %xmm6, %xmm6
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero
+; AVX512VL-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero
+; AVX512VL-NEXT: vsubps %xmm8, %xmm7, %xmm7
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm7, %zmm6, %zmm6
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm5, %zmm4, %zmm4
+; AVX512VL-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10]
+; AVX512VL-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512VL-NEXT: vpermi2pd %zmm6, %zmm4, %zmm5
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm3, %zmm2, %zmm2
+; AVX512VL-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0
+; AVX512VL-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,4,6]
+; AVX512VL-NEXT: vpermi2pd %ymm2, %ymm0, %ymm1
+; AVX512VL-NEXT: vinsertf64x4 $0, %ymm1, %zmm5, %zmm0
+; AVX512VL-NEXT: vmovupd %zmm0, (%rdx)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a2 = getelementptr inbounds i8, ptr %a0, i64 8
+ %b2 = getelementptr inbounds i8, ptr %b0, i64 8
+ %c2 = getelementptr inbounds i8, ptr %c0, i64 8
+ %a4 = getelementptr inbounds i8, ptr %a0, i64 16
+ %b4 = getelementptr inbounds i8, ptr %b0, i64 16
+ %c4 = getelementptr inbounds i8, ptr %c0, i64 16
+ %a6 = getelementptr inbounds i8, ptr %a0, i64 24
+ %b6 = getelementptr inbounds i8, ptr %b0, i64 24
+ %c6 = getelementptr inbounds i8, ptr %c0, i64 24
+ %a8 = getelementptr inbounds i8, ptr %a0, i64 32
+ %b8 = getelementptr inbounds i8, ptr %b0, i64 32
+ %c8 = getelementptr inbounds i8, ptr %c0, i64 32
+ %a10 = getelementptr inbounds i8, ptr %a0, i64 40
+ %b10 = getelementptr inbounds i8, ptr %b0, i64 40
+ %c10 = getelementptr inbounds i8, ptr %c0, i64 40
+ %a12 = getelementptr inbounds i8, ptr %a0, i64 48
+ %b12 = getelementptr inbounds i8, ptr %b0, i64 48
+ %c12 = getelementptr inbounds i8, ptr %c0, i64 48
+ %a14 = getelementptr inbounds i8, ptr %a0, i64 56
+ %b14 = getelementptr inbounds i8, ptr %b0, i64 56
+ %c14 = getelementptr inbounds i8, ptr %c0, i64 56
+ %va0 = load <2 x float>, ptr %a0, align 4
+ %vb0 = load <2 x float>, ptr %b0, align 4
+ %va2 = load <2 x float>, ptr %a2, align 4
+ %vb2 = load <2 x float>, ptr %b2, align 4
+ %va4 = load <2 x float>, ptr %a4, align 4
+ %vb4 = load <2 x float>, ptr %b4, align 4
+ %va6 = load <2 x float>, ptr %a6, align 4
+ %vb6 = load <2 x float>, ptr %b6, align 4
+ %va8 = load <2 x float>, ptr %a8, align 4
+ %vb8 = load <2 x float>, ptr %b8, align 4
+ %va10 = load <2 x float>, ptr %a10, align 4
+ %vb10 = load <2 x float>, ptr %b10, align 4
+ %va12 = load <2 x float>, ptr %a12, align 4
+ %vb12 = load <2 x float>, ptr %b12, align 4
+ %va14 = load <2 x float>, ptr %a14, align 4
+ %vb14 = load <2 x float>, ptr %b14, align 4
+ %vc0 = fsub <2 x float> %va0, %vb0
+ %vc2 = fsub <2 x float> %va2, %vb2
+ %vc4 = fsub <2 x float> %va4, %vb4
+ %vc6 = fsub <2 x float> %va6, %vb6
+ %vc8 = fsub <2 x float> %va8, %vb8
+ %vc10 = fsub <2 x float> %va10, %vb10
+ %vc12 = fsub <2 x float> %va12, %vb12
+ %vc14 = fsub <2 x float> %va14, %vb14
+ store <2 x float> %vc0, ptr %c0, align 4
+ store <2 x float> %vc2, ptr %c2, align 4
+ store <2 x float> %vc4, ptr %c4, align 4
+ store <2 x float> %vc6, ptr %c6, align 4
+ store <2 x float> %vc8, ptr %c8, align 4
+ store <2 x float> %vc10, ptr %c10, align 4
+ store <2 x float> %vc12, ptr %c12, align 4
+ store <2 x float> %vc14, ptr %c14, align 4
+ ret void
+}
More information about the llvm-commits
mailing list