[llvm] [X86] Add bf16 support to isFMAFasterThanFMulAndFAdd for basic FMA optimizations (PR #172006)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 30 04:58:35 PST 2025


https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/172006

>From ab4683df4cf9812dcade476d0b4aee0a4e87f74b Mon Sep 17 00:00:00 2001
From: Antoni Zwolski <antoni.zwolski at intel.com>
Date: Thu, 11 Dec 2025 21:49:27 +0100
Subject: [PATCH 1/5] [X86] Add support for bf16 in isFMAFasterThanFMulAndFAdd

---
 llvm/lib/Target/X86/X86ISelLowering.cpp | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 133406bd8e0d7..894ad3248ebd4 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -35542,6 +35542,8 @@ bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
   switch (VT.getSimpleVT().SimpleTy) {
   case MVT::f16:
     return Subtarget.hasFP16();
+  case MVT::bf16:
+    return Subtarget.hasAVX10_2();
   case MVT::f32:
   case MVT::f64:
     return true;

>From ca1ccbff4f0c649ed9ace2c3d9106be17b34c157 Mon Sep 17 00:00:00 2001
From: Antoni Zwolski <antoni.zwolski at intel.com>
Date: Thu, 11 Dec 2025 21:50:23 +0100
Subject: [PATCH 2/5] [X86] Add bf16 fma tests

---
 llvm/test/CodeGen/X86/avx10_2bf16-fma.ll | 241 +++++++++++++++++++++++
 1 file changed, 241 insertions(+)
 create mode 100644 llvm/test/CodeGen/X86/avx10_2bf16-fma.ll

diff --git a/llvm/test/CodeGen/X86/avx10_2bf16-fma.ll b/llvm/test/CodeGen/X86/avx10_2bf16-fma.ll
new file mode 100644
index 0000000000000..22281fded1295
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avx10_2bf16-fma.ll
@@ -0,0 +1,241 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+avx10.2 | FileCheck %s --check-prefixes=AVX10_2
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+avx512bf16,+avx512vl | FileCheck %s --check-prefixes=AVX512BF16
+
+define bfloat @fuse_bf16(bfloat %a, bfloat %b, bfloat %c) nounwind {
+; AVX10_2-LABEL: fuse_bf16:
+; AVX10_2:       # %bb.0: # %entry
+; AVX10_2-NEXT:    vmovw %xmm1, %eax
+; AVX10_2-NEXT:    vmovw %xmm0, %ecx
+; AVX10_2-NEXT:    vmovw %xmm2, %edx
+; AVX10_2-NEXT:    shll $16, %edx
+; AVX10_2-NEXT:    vmovd %edx, %xmm0
+; AVX10_2-NEXT:    shll $16, %ecx
+; AVX10_2-NEXT:    vmovd %ecx, %xmm1
+; AVX10_2-NEXT:    shll $16, %eax
+; AVX10_2-NEXT:    vmovd %eax, %xmm2
+; AVX10_2-NEXT:    vfmadd213ss {{.*#+}} xmm2 = (xmm1 * xmm2) + xmm0
+; AVX10_2-NEXT:    vcvtneps2bf16 %xmm2, %xmm0
+; AVX10_2-NEXT:    retq
+;
+; AVX512BF16-LABEL: fuse_bf16:
+; AVX512BF16:       # %bb.0: # %entry
+; AVX512BF16-NEXT:    vpextrw $0, %xmm2, %eax
+; AVX512BF16-NEXT:    vpextrw $0, %xmm0, %ecx
+; AVX512BF16-NEXT:    vpextrw $0, %xmm1, %edx
+; AVX512BF16-NEXT:    shll $16, %edx
+; AVX512BF16-NEXT:    vmovd %edx, %xmm0
+; AVX512BF16-NEXT:    shll $16, %ecx
+; AVX512BF16-NEXT:    vmovd %ecx, %xmm1
+; AVX512BF16-NEXT:    vmulss %xmm0, %xmm1, %xmm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %xmm0, %xmm0
+; AVX512BF16-NEXT:    vmovd %xmm0, %ecx
+; AVX512BF16-NEXT:    shll $16, %ecx
+; AVX512BF16-NEXT:    vmovd %ecx, %xmm0
+; AVX512BF16-NEXT:    shll $16, %eax
+; AVX512BF16-NEXT:    vmovd %eax, %xmm1
+; AVX512BF16-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %xmm0, %xmm0
+; AVX512BF16-NEXT:    retq
+entry:
+  %m = fmul contract bfloat %a, %b
+  %r = fadd  contract bfloat %m, %c
+  ret bfloat %r
+}
+
+define <8 x bfloat> @fuse_v8bf16(<8 x bfloat> %x, <8 x bfloat> %y, <8 x bfloat> %z) nounwind {
+; AVX10_2-LABEL: fuse_v8bf16:
+; AVX10_2:       # %bb.0: # %entry
+; AVX10_2-NEXT:    vfmadd213bf16 %xmm2, %xmm1, %xmm0
+; AVX10_2-NEXT:    retq
+;
+; AVX512BF16-LABEL: fuse_v8bf16:
+; AVX512BF16:       # %bb.0: # %entry
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512BF16-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512BF16-NEXT:    vpslld $16, %ymm0, %ymm0
+; AVX512BF16-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %ymm0, %xmm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512BF16-NEXT:    vpslld $16, %ymm0, %ymm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX512BF16-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVX512BF16-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %ymm0, %xmm0
+; AVX512BF16-NEXT:    vzeroupper
+; AVX512BF16-NEXT:    retq
+entry:
+  %m = fmul contract <8 x bfloat> %x, %y
+  %r = fadd  contract <8 x bfloat> %m, %z
+  ret <8 x bfloat> %r
+}
+
+define <16 x bfloat> @fuse_v16bf16(<16 x bfloat> %x, <16 x bfloat> %y, <16 x bfloat> %z) nounwind {
+; AVX10_2-LABEL: fuse_v16bf16:
+; AVX10_2:       # %bb.0: # %entry
+; AVX10_2-NEXT:    vfmadd213bf16 %ymm2, %ymm1, %ymm0
+; AVX10_2-NEXT:    retq
+;
+; AVX512BF16-LABEL: fuse_v16bf16:
+; AVX512BF16:       # %bb.0: # %entry
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm1, %zmm1
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vmulps %zmm1, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm0, %ymm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm1, %zmm1
+; AVX512BF16-NEXT:    vaddps %zmm1, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm0, %ymm0
+; AVX512BF16-NEXT:    retq
+entry:
+  %m = fmul contract <16 x bfloat> %x, %y
+  %r = fadd  contract <16 x bfloat> %m, %z
+  ret <16 x bfloat> %r
+}
+
+define <32 x bfloat> @fuse_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y, <32 x bfloat> %z) nounwind {
+; AVX10_2-LABEL: fuse_v32bf16:
+; AVX10_2:       # %bb.0: # %entry
+; AVX10_2-NEXT:    vfmadd213bf16 %zmm2, %zmm1, %zmm0
+; AVX10_2-NEXT:    retq
+;
+; AVX512BF16-LABEL: fuse_v32bf16:
+; AVX512BF16:       # %bb.0: # %entry
+; AVX512BF16-NEXT:    vextracti64x4 $1, %zmm1, %ymm3
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm3, %zmm3
+; AVX512BF16-NEXT:    vextracti64x4 $1, %zmm0, %ymm4
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm4, %zmm4
+; AVX512BF16-NEXT:    vmulps %zmm3, %zmm4, %zmm3
+; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm3, %ymm3
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm1, %zmm1
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vmulps %zmm1, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm0, %ymm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm1, %zmm1
+; AVX512BF16-NEXT:    vaddps %zmm1, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm0, %ymm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm1, %zmm1
+; AVX512BF16-NEXT:    vextracti64x4 $1, %zmm2, %ymm2
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm2, %zmm2
+; AVX512BF16-NEXT:    vaddps %zmm2, %zmm1, %zmm1
+; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm1, %ymm1
+; AVX512BF16-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BF16-NEXT:    retq
+entry:
+  %m = fmul contract <32 x bfloat> %x, %y
+  %r = fadd  contract <32 x bfloat> %m, %z
+  ret <32 x bfloat> %r
+}
+
+define <5 x bfloat> @fuse_v5bf16(<5 x bfloat> %x, <5 x bfloat> %y, <5 x bfloat> %z) nounwind {
+; AVX10_2-LABEL: fuse_v5bf16:
+; AVX10_2:       # %bb.0: # %entry
+; AVX10_2-NEXT:    vfmadd213bf16 %xmm2, %xmm1, %xmm0
+; AVX10_2-NEXT:    retq
+;
+; AVX512BF16-LABEL: fuse_v5bf16:
+; AVX512BF16:       # %bb.0: # %entry
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512BF16-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512BF16-NEXT:    vpslld $16, %ymm0, %ymm0
+; AVX512BF16-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %ymm0, %xmm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512BF16-NEXT:    vpslld $16, %ymm0, %ymm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX512BF16-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVX512BF16-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %ymm0, %xmm0
+; AVX512BF16-NEXT:    vzeroupper
+; AVX512BF16-NEXT:    retq
+entry:
+  %m = fmul contract <5 x bfloat> %x, %y
+  %r = fadd  contract <5 x bfloat> %m, %z
+  ret <5 x bfloat> %r
+}
+
+define <9 x bfloat> @fnmadd_v9bf16(<9 x bfloat> %x, <9 x bfloat> %y, <9 x bfloat> %z) nounwind {
+; AVX10_2-LABEL: fnmadd_v9bf16:
+; AVX10_2:       # %bb.0: # %entry
+; AVX10_2-NEXT:    vfnmadd213bf16 %ymm2, %ymm1, %ymm0
+; AVX10_2-NEXT:    retq
+;
+; AVX512BF16-LABEL: fnmadd_v9bf16:
+; AVX512BF16:       # %bb.0: # %entry
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm1, %zmm1
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vmulps %zmm1, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm0, %ymm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm1, %zmm1
+; AVX512BF16-NEXT:    vsubps %zmm0, %zmm1, %zmm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm0, %ymm0
+; AVX512BF16-NEXT:    retq
+entry:
+  %m = fmul contract <9 x bfloat> %x, %y
+  %n = fneg <9 x bfloat> %m
+  %r = fadd contract <9 x bfloat> %n, %z
+  ret <9 x bfloat> %r
+}
+
+define <29 x bfloat> @fuse_v19bf16_load(<29 x bfloat> %x, <29 x bfloat> %y, ptr %p) nounwind {
+; AVX10_2-LABEL: fuse_v19bf16_load:
+; AVX10_2:       # %bb.0: # %entry
+; AVX10_2-NEXT:    vfmadd213bf16 (%rdi), %zmm1, %zmm0
+; AVX10_2-NEXT:    retq
+;
+; AVX512BF16-LABEL: fuse_v19bf16_load:
+; AVX512BF16:       # %bb.0: # %entry
+; AVX512BF16-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm2, %zmm2
+; AVX512BF16-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm3, %zmm3
+; AVX512BF16-NEXT:    vmulps %zmm2, %zmm3, %zmm2
+; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm2, %ymm2
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm1, %zmm1
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vmulps %zmm1, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm0, %ymm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm1, %zmm1
+; AVX512BF16-NEXT:    vaddps %zmm1, %zmm0, %zmm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm0, %ymm0
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm1, %zmm1
+; AVX512BF16-NEXT:    vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512BF16-NEXT:    vpslld $16, %zmm2, %zmm2
+; AVX512BF16-NEXT:    vaddps %zmm2, %zmm1, %zmm1
+; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm1, %ymm1
+; AVX512BF16-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BF16-NEXT:    retq
+entry:
+  %z = load <29 x bfloat>, ptr %p
+  %m = fmul contract <29 x bfloat> %x, %y
+  %r = fadd contract <29 x bfloat> %m, %z
+  ret <29 x bfloat> %r
+}

>From 8bd3f6e4199ef5787d2d593584646011a3d4c597 Mon Sep 17 00:00:00 2001
From: Antoni Zwolski <antoni.zwolski at intel.com>
Date: Fri, 19 Dec 2025 15:28:14 +0100
Subject: [PATCH 3/5] [X86] Update isFMAFasterThanFMulAndFAdd check

---
 llvm/lib/Target/X86/X86ISelLowering.cpp | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 894ad3248ebd4..e8a66eb0995aa 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -35534,6 +35534,7 @@ bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
   if (!Subtarget.hasAnyFMA())
     return false;
 
+  bool IsVector = VT.isVector();
   VT = VT.getScalarType();
 
   if (!VT.isSimple())
@@ -35543,7 +35544,9 @@ bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
   case MVT::f16:
     return Subtarget.hasFP16();
   case MVT::bf16:
-    return Subtarget.hasAVX10_2();
+    return (!IsVector &&
+            (Subtarget.hasBF16() || Subtarget.hasAVXNECONVERT())) ||
+           Subtarget.hasAVX10_2();
   case MVT::f32:
   case MVT::f64:
     return true;

>From bc58d87f0f54a30835d4d25c11d6db70b588095e Mon Sep 17 00:00:00 2001
From: Antoni Zwolski <antoni.zwolski at intel.com>
Date: Fri, 19 Dec 2025 15:28:44 +0100
Subject: [PATCH 4/5] [X86] Update llc test checks

---
 llvm/test/CodeGen/X86/avx10_2bf16-fma.ll | 15 +++++----------
 1 file changed, 5 insertions(+), 10 deletions(-)

diff --git a/llvm/test/CodeGen/X86/avx10_2bf16-fma.ll b/llvm/test/CodeGen/X86/avx10_2bf16-fma.ll
index 22281fded1295..8394df12af7b8 100644
--- a/llvm/test/CodeGen/X86/avx10_2bf16-fma.ll
+++ b/llvm/test/CodeGen/X86/avx10_2bf16-fma.ll
@@ -20,22 +20,17 @@ define bfloat @fuse_bf16(bfloat %a, bfloat %b, bfloat %c) nounwind {
 ;
 ; AVX512BF16-LABEL: fuse_bf16:
 ; AVX512BF16:       # %bb.0: # %entry
-; AVX512BF16-NEXT:    vpextrw $0, %xmm2, %eax
+; AVX512BF16-NEXT:    vpextrw $0, %xmm1, %eax
 ; AVX512BF16-NEXT:    vpextrw $0, %xmm0, %ecx
-; AVX512BF16-NEXT:    vpextrw $0, %xmm1, %edx
+; AVX512BF16-NEXT:    vpextrw $0, %xmm2, %edx
 ; AVX512BF16-NEXT:    shll $16, %edx
 ; AVX512BF16-NEXT:    vmovd %edx, %xmm0
 ; AVX512BF16-NEXT:    shll $16, %ecx
 ; AVX512BF16-NEXT:    vmovd %ecx, %xmm1
-; AVX512BF16-NEXT:    vmulss %xmm0, %xmm1, %xmm0
-; AVX512BF16-NEXT:    vcvtneps2bf16 %xmm0, %xmm0
-; AVX512BF16-NEXT:    vmovd %xmm0, %ecx
-; AVX512BF16-NEXT:    shll $16, %ecx
-; AVX512BF16-NEXT:    vmovd %ecx, %xmm0
 ; AVX512BF16-NEXT:    shll $16, %eax
-; AVX512BF16-NEXT:    vmovd %eax, %xmm1
-; AVX512BF16-NEXT:    vaddss %xmm1, %xmm0, %xmm0
-; AVX512BF16-NEXT:    vcvtneps2bf16 %xmm0, %xmm0
+; AVX512BF16-NEXT:    vmovd %eax, %xmm2
+; AVX512BF16-NEXT:    vfmadd213ss {{.*#+}} xmm2 = (xmm1 * xmm2) + xmm0
+; AVX512BF16-NEXT:    vcvtneps2bf16 %xmm2, %xmm0
 ; AVX512BF16-NEXT:    retq
 entry:
   %m = fmul contract bfloat %a, %b

>From 7e96992b7fbc487b475163bf5d1d6f3e56c193fc Mon Sep 17 00:00:00 2001
From: Antoni Zwolski <antoni.zwolski at intel.com>
Date: Fri, 19 Dec 2025 15:31:14 +0100
Subject: [PATCH 5/5] [X86] Add avxneconvert run line

---
 llvm/test/CodeGen/X86/avx10_2bf16-fma.ll | 570 +++++++++++++++++++++++
 1 file changed, 570 insertions(+)

diff --git a/llvm/test/CodeGen/X86/avx10_2bf16-fma.ll b/llvm/test/CodeGen/X86/avx10_2bf16-fma.ll
index 8394df12af7b8..d79f0cc79b5f4 100644
--- a/llvm/test/CodeGen/X86/avx10_2bf16-fma.ll
+++ b/llvm/test/CodeGen/X86/avx10_2bf16-fma.ll
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
 ; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+avx10.2 | FileCheck %s --check-prefixes=AVX10_2
 ; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+avx512bf16,+avx512vl | FileCheck %s --check-prefixes=AVX512BF16
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mattr=+avxneconvert | FileCheck %s --check-prefixes=AVXNECONVERT
 
 define bfloat @fuse_bf16(bfloat %a, bfloat %b, bfloat %c) nounwind {
 ; AVX10_2-LABEL: fuse_bf16:
@@ -32,6 +33,26 @@ define bfloat @fuse_bf16(bfloat %a, bfloat %b, bfloat %c) nounwind {
 ; AVX512BF16-NEXT:    vfmadd213ss {{.*#+}} xmm2 = (xmm1 * xmm2) + xmm0
 ; AVX512BF16-NEXT:    vcvtneps2bf16 %xmm2, %xmm0
 ; AVX512BF16-NEXT:    retq
+;
+; AVXNECONVERT-LABEL: fuse_bf16:
+; AVXNECONVERT:       # %bb.0: # %entry
+; AVXNECONVERT-NEXT:    vpextrw $0, %xmm2, %eax
+; AVXNECONVERT-NEXT:    vpextrw $0, %xmm0, %ecx
+; AVXNECONVERT-NEXT:    vpextrw $0, %xmm1, %edx
+; AVXNECONVERT-NEXT:    shll $16, %edx
+; AVXNECONVERT-NEXT:    vmovd %edx, %xmm0
+; AVXNECONVERT-NEXT:    shll $16, %ecx
+; AVXNECONVERT-NEXT:    vmovd %ecx, %xmm1
+; AVXNECONVERT-NEXT:    vmulss %xmm0, %xmm1, %xmm0
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %xmm0, %xmm0
+; AVXNECONVERT-NEXT:    vmovd %xmm0, %ecx
+; AVXNECONVERT-NEXT:    shll $16, %ecx
+; AVXNECONVERT-NEXT:    vmovd %ecx, %xmm0
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm1
+; AVXNECONVERT-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %xmm0, %xmm0
+; AVXNECONVERT-NEXT:    retq
 entry:
   %m = fmul contract bfloat %a, %b
   %r = fadd  contract bfloat %m, %c
@@ -60,6 +81,139 @@ define <8 x bfloat> @fuse_v8bf16(<8 x bfloat> %x, <8 x bfloat> %y, <8 x bfloat>
 ; AVX512BF16-NEXT:    vcvtneps2bf16 %ymm0, %xmm0
 ; AVX512BF16-NEXT:    vzeroupper
 ; AVX512BF16-NEXT:    retq
+;
+; AVXNECONVERT-LABEL: fuse_v8bf16:
+; AVXNECONVERT:       # %bb.0: # %entry
+; AVXNECONVERT-NEXT:    vpextrw $5, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vpextrw $4, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $6, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
+; AVXNECONVERT-NEXT:    vpextrw $7, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
+; AVXNECONVERT-NEXT:    vpextrw $1, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vmovd %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm5
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $2, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm5
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0],xmm4[3]
+; AVXNECONVERT-NEXT:    vpextrw $3, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm1
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[0]
+; AVXNECONVERT-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    vpextrw $5, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vpextrw $4, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $6, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
+; AVXNECONVERT-NEXT:    vpextrw $7, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
+; AVXNECONVERT-NEXT:    vpextrw $1, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vmovd %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm5
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $2, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm5
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0],xmm4[3]
+; AVXNECONVERT-NEXT:    vpextrw $3, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm0
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm0 = xmm4[0,1,2],xmm0[0]
+; AVXNECONVERT-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm0, %xmm0
+; AVXNECONVERT-NEXT:    vpextrw $5, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm1
+; AVXNECONVERT-NEXT:    vpextrw $4, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $6, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; AVXNECONVERT-NEXT:    vpextrw $7, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0]
+; AVXNECONVERT-NEXT:    vpextrw $1, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vmovd %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $2, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
+; AVXNECONVERT-NEXT:    vpextrw $3, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm0
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
+; AVXNECONVERT-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vpextrw $5, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm1
+; AVXNECONVERT-NEXT:    vpextrw $4, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $6, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; AVXNECONVERT-NEXT:    vpextrw $7, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0]
+; AVXNECONVERT-NEXT:    vpextrw $1, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vmovd %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $2, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
+; AVXNECONVERT-NEXT:    vpextrw $3, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm2
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[0]
+; AVXNECONVERT-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVXNECONVERT-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm0, %xmm0
+; AVXNECONVERT-NEXT:    vzeroupper
+; AVXNECONVERT-NEXT:    retq
 entry:
   %m = fmul contract <8 x bfloat> %x, %y
   %r = fadd  contract <8 x bfloat> %m, %z
@@ -87,6 +241,38 @@ define <16 x bfloat> @fuse_v16bf16(<16 x bfloat> %x, <16 x bfloat> %y, <16 x bfl
 ; AVX512BF16-NEXT:    vaddps %zmm1, %zmm0, %zmm0
 ; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm0, %ymm0
 ; AVX512BF16-NEXT:    retq
+;
+; AVXNECONVERT-LABEL: fuse_v16bf16:
+; AVXNECONVERT:       # %bb.0: # %entry
+; AVXNECONVERT-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm3, %ymm3
+; AVXNECONVERT-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm4, %ymm4
+; AVXNECONVERT-NEXT:    vmulps %ymm3, %ymm4, %ymm3
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm3, %xmm3
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm0, %xmm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm0, %xmm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    vaddps %ymm2, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm1, %xmm1
+; AVXNECONVERT-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    retq
 entry:
   %m = fmul contract <16 x bfloat> %x, %y
   %r = fadd  contract <16 x bfloat> %m, %z
@@ -130,6 +316,66 @@ define <32 x bfloat> @fuse_v32bf16(<32 x bfloat> %x, <32 x bfloat> %y, <32 x bfl
 ; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm1, %ymm1
 ; AVX512BF16-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512BF16-NEXT:    retq
+;
+; AVXNECONVERT-LABEL: fuse_v32bf16:
+; AVXNECONVERT:       # %bb.0: # %entry
+; AVXNECONVERT-NEXT:    vextracti128 $1, %ymm3, %xmm6
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm6, %ymm6
+; AVXNECONVERT-NEXT:    vextracti128 $1, %ymm1, %xmm7
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm7, %ymm7
+; AVXNECONVERT-NEXT:    vmulps %ymm6, %ymm7, %ymm6
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm6, %xmm6
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm3, %ymm3
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    vmulps %ymm3, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm1, %xmm1
+; AVXNECONVERT-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm3, %ymm3
+; AVXNECONVERT-NEXT:    vextracti128 $1, %ymm0, %xmm7
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm7, %ymm7
+; AVXNECONVERT-NEXT:    vmulps %ymm3, %ymm7, %ymm3
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm3, %xmm3
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vmulps %ymm2, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm0, %xmm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    vaddps %ymm2, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm0, %xmm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    vextracti128 $1, %ymm4, %xmm3
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm3, %ymm3
+; AVXNECONVERT-NEXT:    vaddps %ymm3, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm2, %xmm2
+; AVXNECONVERT-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    vaddps %ymm2, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm1, %xmm1
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    vextracti128 $1, %ymm5, %xmm3
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm3, %ymm3
+; AVXNECONVERT-NEXT:    vaddps %ymm3, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm2, %xmm2
+; AVXNECONVERT-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    retq
 entry:
   %m = fmul contract <32 x bfloat> %x, %y
   %r = fadd  contract <32 x bfloat> %m, %z
@@ -158,6 +404,139 @@ define <5 x bfloat> @fuse_v5bf16(<5 x bfloat> %x, <5 x bfloat> %y, <5 x bfloat>
 ; AVX512BF16-NEXT:    vcvtneps2bf16 %ymm0, %xmm0
 ; AVX512BF16-NEXT:    vzeroupper
 ; AVX512BF16-NEXT:    retq
+;
+; AVXNECONVERT-LABEL: fuse_v5bf16:
+; AVXNECONVERT:       # %bb.0: # %entry
+; AVXNECONVERT-NEXT:    vpextrw $5, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vpextrw $4, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $6, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
+; AVXNECONVERT-NEXT:    vpextrw $7, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
+; AVXNECONVERT-NEXT:    vpextrw $1, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vmovd %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm5
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $2, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm5
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0],xmm4[3]
+; AVXNECONVERT-NEXT:    vpextrw $3, %xmm1, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm1
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[0]
+; AVXNECONVERT-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    vpextrw $5, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vpextrw $4, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $6, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
+; AVXNECONVERT-NEXT:    vpextrw $7, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
+; AVXNECONVERT-NEXT:    vpextrw $1, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vmovd %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm5
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $2, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm5
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0],xmm4[3]
+; AVXNECONVERT-NEXT:    vpextrw $3, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm0
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm0 = xmm4[0,1,2],xmm0[0]
+; AVXNECONVERT-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm0, %xmm0
+; AVXNECONVERT-NEXT:    vpextrw $5, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm1
+; AVXNECONVERT-NEXT:    vpextrw $4, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $6, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; AVXNECONVERT-NEXT:    vpextrw $7, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0]
+; AVXNECONVERT-NEXT:    vpextrw $1, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vmovd %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $2, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
+; AVXNECONVERT-NEXT:    vpextrw $3, %xmm0, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm0
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
+; AVXNECONVERT-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vpextrw $5, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm1
+; AVXNECONVERT-NEXT:    vpextrw $4, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $6, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; AVXNECONVERT-NEXT:    vpextrw $7, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0]
+; AVXNECONVERT-NEXT:    vpextrw $1, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm3
+; AVXNECONVERT-NEXT:    vmovd %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
+; AVXNECONVERT-NEXT:    vpextrw $2, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm4
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
+; AVXNECONVERT-NEXT:    vpextrw $3, %xmm2, %eax
+; AVXNECONVERT-NEXT:    shll $16, %eax
+; AVXNECONVERT-NEXT:    vmovd %eax, %xmm2
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[0]
+; AVXNECONVERT-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVXNECONVERT-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm0, %xmm0
+; AVXNECONVERT-NEXT:    vzeroupper
+; AVXNECONVERT-NEXT:    retq
 entry:
   %m = fmul contract <5 x bfloat> %x, %y
   %r = fadd  contract <5 x bfloat> %m, %z
@@ -185,6 +564,38 @@ define <9 x bfloat> @fnmadd_v9bf16(<9 x bfloat> %x, <9 x bfloat> %y, <9 x bfloat
 ; AVX512BF16-NEXT:    vsubps %zmm0, %zmm1, %zmm0
 ; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm0, %ymm0
 ; AVX512BF16-NEXT:    retq
+;
+; AVXNECONVERT-LABEL: fnmadd_v9bf16:
+; AVXNECONVERT:       # %bb.0: # %entry
+; AVXNECONVERT-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm3, %ymm3
+; AVXNECONVERT-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm4, %ymm4
+; AVXNECONVERT-NEXT:    vmulps %ymm3, %ymm4, %ymm3
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm3, %xmm3
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm0, %xmm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    vsubps %ymm0, %ymm1, %ymm0
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm0, %xmm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    vsubps %ymm1, %ymm2, %ymm1
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm1, %xmm1
+; AVXNECONVERT-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    retq
 entry:
   %m = fmul contract <9 x bfloat> %x, %y
   %n = fneg <9 x bfloat> %m
@@ -228,6 +639,165 @@ define <29 x bfloat> @fuse_v19bf16_load(<29 x bfloat> %x, <29 x bfloat> %y, ptr
 ; AVX512BF16-NEXT:    vcvtneps2bf16 %zmm1, %ymm1
 ; AVX512BF16-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512BF16-NEXT:    retq
+;
+; AVXNECONVERT-LABEL: fuse_v19bf16_load:
+; AVXNECONVERT:       # %bb.0: # %entry
+; AVXNECONVERT-NEXT:    movq %rdi, %rax
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm8
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm9
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm10
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm11
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm12
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm13
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm13
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm13
+; AVXNECONVERT-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm12
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm13
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm13
+; AVXNECONVERT-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm12
+; AVXNECONVERT-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm10[0],xmm11[0]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm11
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm12
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm13
+; AVXNECONVERT-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm12
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm13
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm13
+; AVXNECONVERT-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm12[0],xmm9[1],xmm12[1]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm12
+; AVXNECONVERT-NEXT:    vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm11
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm10 = xmm10[0],zero,xmm10[1],zero,xmm10[2],zero,xmm10[3],zero,xmm10[4],zero,xmm10[5],zero,xmm10[6],zero,xmm10[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm10, %ymm10
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm9 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero,xmm9[4],zero,xmm9[5],zero,xmm9[6],zero,xmm9[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm9, %ymm9
+; AVXNECONVERT-NEXT:    vmulps %ymm10, %ymm9, %ymm9
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm10
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm11
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm12
+; AVXNECONVERT-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm11
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm12
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm13
+; AVXNECONVERT-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm12
+; AVXNECONVERT-NEXT:    vpunpcklqdq {{.*#+}} xmm10 = xmm11[0],xmm10[0]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm11
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm12
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm13
+; AVXNECONVERT-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm12
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm13
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm13[0],xmm8[1],xmm13[1],xmm8[2],xmm13[2],xmm8[3],xmm13[3]
+; AVXNECONVERT-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm12[0],xmm8[1],xmm12[1]
+; AVXNECONVERT-NEXT:    vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm11[0]
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm10 = xmm10[0],zero,xmm10[1],zero,xmm10[2],zero,xmm10[3],zero,xmm10[4],zero,xmm10[5],zero,xmm10[6],zero,xmm10[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm10, %ymm10
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm8 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero,xmm8[4],zero,xmm8[5],zero,xmm8[6],zero,xmm8[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm8, %ymm8
+; AVXNECONVERT-NEXT:    vmulps %ymm10, %ymm8, %ymm8
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVXNECONVERT-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm3
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm1
+; AVXNECONVERT-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm2
+; AVXNECONVERT-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm4
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm4
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm4
+; AVXNECONVERT-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm2
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm4
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm4
+; AVXNECONVERT-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm3
+; AVXNECONVERT-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm2
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm1
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm2
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],zero,zero
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm2
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm2
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm3
+; AVXNECONVERT-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],zero,zero
+; AVXNECONVERT-NEXT:    vpinsrw $0, {{[0-9]+}}(%rsp), %xmm0, %xmm3
+; AVXNECONVERT-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm8, %xmm3
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm0, %xmm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    vmulps %ymm1, %ymm2, %ymm1
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    vaddps %ymm2, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm3, %ymm3
+; AVXNECONVERT-NEXT:    vaddps %ymm3, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm9, %xmm3
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm1, %xmm1
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm0, %xmm0
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm2, %xmm2
+; AVXNECONVERT-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm3, %ymm3
+; AVXNECONVERT-NEXT:    vaddps %ymm3, %ymm2, %ymm2
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    vpmovzxwd {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVXNECONVERT-NEXT:    vpslld $16, %ymm3, %ymm3
+; AVXNECONVERT-NEXT:    vaddps %ymm3, %ymm1, %ymm1
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm1, %xmm1
+; AVXNECONVERT-NEXT:    vpextrw $4, %xmm1, 56(%rdi)
+; AVXNECONVERT-NEXT:    vmovq %xmm1, 48(%rdi)
+; AVXNECONVERT-NEXT:    {vex} vcvtneps2bf16 %ymm2, %xmm1
+; AVXNECONVERT-NEXT:    vmovaps %xmm1, 32(%rdi)
+; AVXNECONVERT-NEXT:    vmovaps %ymm0, (%rdi)
+; AVXNECONVERT-NEXT:    vzeroupper
+; AVXNECONVERT-NEXT:    retq
 entry:
   %z = load <29 x bfloat>, ptr %p
   %m = fmul contract <29 x bfloat> %x, %y



More information about the llvm-commits mailing list