[llvm-branch-commits] [llvm] a0de0b4 - [X86][BF16] Add test coverage for AVX-NE-CONVERT

Tobias Hieta via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Thu Aug 31 23:27:38 PDT 2023


Author: Phoebe Wang
Date: 2023-09-01T08:24:05+02:00
New Revision: a0de0b440fa606805749dc7469a1b7dcae851eba

URL: https://github.com/llvm/llvm-project/commit/a0de0b440fa606805749dc7469a1b7dcae851eba
DIFF: https://github.com/llvm/llvm-project/commit/a0de0b440fa606805749dc7469a1b7dcae851eba.diff

LOG: [X86][BF16] Add test coverage for AVX-NE-CONVERT

Split from D158952.

(cherry picked from commit 30ec9473c6685d64d5caa17e2a6e8f4ccf275159)

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/bfloat.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index ae9012055bbb93..7bcc181e2ca911 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -11360,7 +11360,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
   if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
     return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
 
-  if (VT.getVectorElementType() == MVT::bf16 && Subtarget.hasBF16())
+  if (VT.getVectorElementType() == MVT::bf16 &&
+      (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16()))
     return LowerBUILD_VECTORvXbf16(Op, DAG, Subtarget);
 
   if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))

diff  --git a/llvm/test/CodeGen/X86/bfloat.ll b/llvm/test/CodeGen/X86/bfloat.ll
index dff4864537bfd2..6798adaf1e5f2e 100644
--- a/llvm/test/CodeGen/X86/bfloat.ll
+++ b/llvm/test/CodeGen/X86/bfloat.ll
@@ -1,7 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,SSE2
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16,avx512vl | FileCheck %s --check-prefixes=CHECK,F16,BF16
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16,avx512fp16,avx512vl | FileCheck %s --check-prefixes=CHECK,F16,FP16
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16,avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,F16,BF16
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16,avx512fp16,avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,F16,FP16
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avxneconvert | FileCheck %s --check-prefixes=CHECK,AVX,AVXNC
 
 define void @add(ptr %pa, ptr %pb, ptr %pc) nounwind {
 ; SSE2-LABEL: add:
@@ -21,22 +22,22 @@ define void @add(ptr %pa, ptr %pb, ptr %pc) nounwind {
 ; SSE2-NEXT:    popq %rbx
 ; SSE2-NEXT:    retq
 ;
-; F16-LABEL: add:
-; F16:       # %bb.0:
-; F16-NEXT:    pushq %rbx
-; F16-NEXT:    movq %rdx, %rbx
-; F16-NEXT:    movzwl (%rsi), %eax
-; F16-NEXT:    shll $16, %eax
-; F16-NEXT:    vmovd %eax, %xmm0
-; F16-NEXT:    movzwl (%rdi), %eax
-; F16-NEXT:    shll $16, %eax
-; F16-NEXT:    vmovd %eax, %xmm1
-; F16-NEXT:    vaddss %xmm0, %xmm1, %xmm0
-; F16-NEXT:    callq __truncsfbf2 at PLT
-; F16-NEXT:    vmovd %xmm0, %eax
-; F16-NEXT:    movw %ax, (%rbx)
-; F16-NEXT:    popq %rbx
-; F16-NEXT:    retq
+; AVX-LABEL: add:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    movq %rdx, %rbx
+; AVX-NEXT:    movzwl (%rsi), %eax
+; AVX-NEXT:    shll $16, %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    movzwl (%rdi), %eax
+; AVX-NEXT:    shll $16, %eax
+; AVX-NEXT:    vmovd %eax, %xmm1
+; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    callq __truncsfbf2 at PLT
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    movw %ax, (%rbx)
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    retq
   %a = load bfloat, ptr %pa
   %b = load bfloat, ptr %pb
   %add = fadd bfloat %a, %b
@@ -59,19 +60,19 @@ define bfloat @add2(bfloat %a, bfloat %b) nounwind {
 ; SSE2-NEXT:    popq %rax
 ; SSE2-NEXT:    retq
 ;
-; F16-LABEL: add2:
-; F16:       # %bb.0:
-; F16-NEXT:    pushq %rax
-; F16-NEXT:    vmovd %xmm0, %eax
-; F16-NEXT:    vmovd %xmm1, %ecx
-; F16-NEXT:    shll $16, %ecx
-; F16-NEXT:    vmovd %ecx, %xmm0
-; F16-NEXT:    shll $16, %eax
-; F16-NEXT:    vmovd %eax, %xmm1
-; F16-NEXT:    vaddss %xmm0, %xmm1, %xmm0
-; F16-NEXT:    callq __truncsfbf2 at PLT
-; F16-NEXT:    popq %rax
-; F16-NEXT:    retq
+; AVX-LABEL: add2:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %rax
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    vmovd %xmm1, %ecx
+; AVX-NEXT:    shll $16, %ecx
+; AVX-NEXT:    vmovd %ecx, %xmm0
+; AVX-NEXT:    shll $16, %eax
+; AVX-NEXT:    vmovd %eax, %xmm1
+; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    callq __truncsfbf2 at PLT
+; AVX-NEXT:    popq %rax
+; AVX-NEXT:    retq
   %add = fadd bfloat %a, %b
   ret bfloat %add
 }
@@ -106,34 +107,34 @@ define void @add_double(ptr %pa, ptr %pb, ptr %pc) nounwind {
 ; SSE2-NEXT:    popq %rbp
 ; SSE2-NEXT:    retq
 ;
-; F16-LABEL: add_double:
-; F16:       # %bb.0:
-; F16-NEXT:    pushq %rbp
-; F16-NEXT:    pushq %r14
-; F16-NEXT:    pushq %rbx
-; F16-NEXT:    movq %rdx, %rbx
-; F16-NEXT:    movq %rsi, %r14
-; F16-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
-; F16-NEXT:    callq __truncdfbf2 at PLT
-; F16-NEXT:    vmovd %xmm0, %ebp
-; F16-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
-; F16-NEXT:    callq __truncdfbf2 at PLT
-; F16-NEXT:    vmovd %xmm0, %eax
-; F16-NEXT:    shll $16, %eax
-; F16-NEXT:    vmovd %eax, %xmm0
-; F16-NEXT:    shll $16, %ebp
-; F16-NEXT:    vmovd %ebp, %xmm1
-; F16-NEXT:    vaddss %xmm0, %xmm1, %xmm0
-; F16-NEXT:    callq __truncsfbf2 at PLT
-; F16-NEXT:    vmovd %xmm0, %eax
-; F16-NEXT:    shll $16, %eax
-; F16-NEXT:    vmovd %eax, %xmm0
-; F16-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; F16-NEXT:    vmovsd %xmm0, (%rbx)
-; F16-NEXT:    popq %rbx
-; F16-NEXT:    popq %r14
-; F16-NEXT:    popq %rbp
-; F16-NEXT:    retq
+; AVX-LABEL: add_double:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %rbp
+; AVX-NEXT:    pushq %r14
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    movq %rdx, %rbx
+; AVX-NEXT:    movq %rsi, %r14
+; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT:    callq __truncdfbf2 at PLT
+; AVX-NEXT:    vmovd %xmm0, %ebp
+; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT:    callq __truncdfbf2 at PLT
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    shll $16, %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    shll $16, %ebp
+; AVX-NEXT:    vmovd %ebp, %xmm1
+; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    callq __truncsfbf2 at PLT
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    shll $16, %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vmovsd %xmm0, (%rbx)
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    popq %r14
+; AVX-NEXT:    popq %rbp
+; AVX-NEXT:    retq
   %la = load double, ptr %pa
   %a = fptrunc double %la to bfloat
   %lb = load double, ptr %pb
@@ -170,30 +171,30 @@ define double @add_double2(double %da, double %db) nounwind {
 ; SSE2-NEXT:    popq %rbx
 ; SSE2-NEXT:    retq
 ;
-; F16-LABEL: add_double2:
-; F16:       # %bb.0:
-; F16-NEXT:    pushq %rbx
-; F16-NEXT:    subq $16, %rsp
-; F16-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; F16-NEXT:    callq __truncdfbf2 at PLT
-; F16-NEXT:    vmovd %xmm0, %ebx
-; F16-NEXT:    vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
-; F16-NEXT:    # xmm0 = mem[0],zero
-; F16-NEXT:    callq __truncdfbf2 at PLT
-; F16-NEXT:    vmovd %xmm0, %eax
-; F16-NEXT:    shll $16, %eax
-; F16-NEXT:    vmovd %eax, %xmm0
-; F16-NEXT:    shll $16, %ebx
-; F16-NEXT:    vmovd %ebx, %xmm1
-; F16-NEXT:    vaddss %xmm0, %xmm1, %xmm0
-; F16-NEXT:    callq __truncsfbf2 at PLT
-; F16-NEXT:    vmovd %xmm0, %eax
-; F16-NEXT:    shll $16, %eax
-; F16-NEXT:    vmovd %eax, %xmm0
-; F16-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; F16-NEXT:    addq $16, %rsp
-; F16-NEXT:    popq %rbx
-; F16-NEXT:    retq
+; AVX-LABEL: add_double2:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    subq $16, %rsp
+; AVX-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX-NEXT:    callq __truncdfbf2 at PLT
+; AVX-NEXT:    vmovd %xmm0, %ebx
+; AVX-NEXT:    vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
+; AVX-NEXT:    # xmm0 = mem[0],zero
+; AVX-NEXT:    callq __truncdfbf2 at PLT
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    shll $16, %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    shll $16, %ebx
+; AVX-NEXT:    vmovd %ebx, %xmm1
+; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    callq __truncsfbf2 at PLT
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    shll $16, %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    addq $16, %rsp
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    retq
   %a = fptrunc double %da to bfloat
   %b = fptrunc double %db to bfloat
   %add = fadd bfloat %a, %b
@@ -216,19 +217,19 @@ define void @add_constant(ptr %pa, ptr %pc) nounwind {
 ; SSE2-NEXT:    popq %rbx
 ; SSE2-NEXT:    retq
 ;
-; F16-LABEL: add_constant:
-; F16:       # %bb.0:
-; F16-NEXT:    pushq %rbx
-; F16-NEXT:    movq %rsi, %rbx
-; F16-NEXT:    movzwl (%rdi), %eax
-; F16-NEXT:    shll $16, %eax
-; F16-NEXT:    vmovd %eax, %xmm0
-; F16-NEXT:    vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; F16-NEXT:    callq __truncsfbf2 at PLT
-; F16-NEXT:    vmovd %xmm0, %eax
-; F16-NEXT:    movw %ax, (%rbx)
-; F16-NEXT:    popq %rbx
-; F16-NEXT:    retq
+; AVX-LABEL: add_constant:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    movq %rsi, %rbx
+; AVX-NEXT:    movzwl (%rdi), %eax
+; AVX-NEXT:    shll $16, %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    callq __truncsfbf2 at PLT
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    movw %ax, (%rbx)
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    retq
   %a = load bfloat, ptr %pa
   %add = fadd bfloat %a, 1.0
   store bfloat %add, ptr %pc
@@ -247,16 +248,16 @@ define bfloat @add_constant2(bfloat %a) nounwind {
 ; SSE2-NEXT:    popq %rax
 ; SSE2-NEXT:    retq
 ;
-; F16-LABEL: add_constant2:
-; F16:       # %bb.0:
-; F16-NEXT:    pushq %rax
-; F16-NEXT:    vmovd %xmm0, %eax
-; F16-NEXT:    shll $16, %eax
-; F16-NEXT:    vmovd %eax, %xmm0
-; F16-NEXT:    vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; F16-NEXT:    callq __truncsfbf2 at PLT
-; F16-NEXT:    popq %rax
-; F16-NEXT:    retq
+; AVX-LABEL: add_constant2:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %rax
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    shll $16, %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    callq __truncsfbf2 at PLT
+; AVX-NEXT:    popq %rax
+; AVX-NEXT:    retq
   %add = fadd bfloat %a, 1.0
   ret bfloat %add
 }
@@ -656,6 +657,120 @@ define <8 x bfloat> @addv(<8 x bfloat> %a, <8 x bfloat> %b) nounwind {
 ; FP16-NEXT:    popq %r15
 ; FP16-NEXT:    popq %rbp
 ; FP16-NEXT:    retq
+;
+; AVXNC-LABEL: addv:
+; AVXNC:       # %bb.0:
+; AVXNC-NEXT:    pushq %rbp
+; AVXNC-NEXT:    pushq %r15
+; AVXNC-NEXT:    pushq %r14
+; AVXNC-NEXT:    pushq %r13
+; AVXNC-NEXT:    pushq %r12
+; AVXNC-NEXT:    pushq %rbx
+; AVXNC-NEXT:    subq $40, %rsp
+; AVXNC-NEXT:    vmovdqa %xmm1, (%rsp) # 16-byte Spill
+; AVXNC-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVXNC-NEXT:    vpextrw $7, %xmm1, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm2
+; AVXNC-NEXT:    vpextrw $7, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vaddss %xmm2, %xmm1, %xmm0
+; AVXNC-NEXT:    callq __truncsfbf2 at PLT
+; AVXNC-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVXNC-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVXNC-NEXT:    vpextrw $6, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm0
+; AVXNC-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVXNC-NEXT:    vpextrw $6, %xmm1, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVXNC-NEXT:    callq __truncsfbf2 at PLT
+; AVXNC-NEXT:    vmovd %xmm0, %ebp
+; AVXNC-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVXNC-NEXT:    vpextrw $5, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm0
+; AVXNC-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVXNC-NEXT:    vpextrw $5, %xmm1, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVXNC-NEXT:    callq __truncsfbf2 at PLT
+; AVXNC-NEXT:    vmovd %xmm0, %r14d
+; AVXNC-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVXNC-NEXT:    vpextrw $4, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm0
+; AVXNC-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVXNC-NEXT:    vpextrw $4, %xmm1, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVXNC-NEXT:    callq __truncsfbf2 at PLT
+; AVXNC-NEXT:    vmovd %xmm0, %r15d
+; AVXNC-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVXNC-NEXT:    vpextrw $3, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm0
+; AVXNC-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVXNC-NEXT:    vpextrw $3, %xmm1, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVXNC-NEXT:    callq __truncsfbf2 at PLT
+; AVXNC-NEXT:    vmovd %xmm0, %r12d
+; AVXNC-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVXNC-NEXT:    vpextrw $2, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm0
+; AVXNC-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVXNC-NEXT:    vpextrw $2, %xmm1, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVXNC-NEXT:    callq __truncsfbf2 at PLT
+; AVXNC-NEXT:    vmovd %xmm0, %r13d
+; AVXNC-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVXNC-NEXT:    vpextrw $1, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm0
+; AVXNC-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVXNC-NEXT:    vpextrw $1, %xmm1, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVXNC-NEXT:    callq __truncsfbf2 at PLT
+; AVXNC-NEXT:    vmovd %xmm0, %ebx
+; AVXNC-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVXNC-NEXT:    vmovd %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm0
+; AVXNC-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVXNC-NEXT:    vmovd %xmm1, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVXNC-NEXT:    callq __truncsfbf2 at PLT
+; AVXNC-NEXT:    vmovd %xmm0, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm0
+; AVXNC-NEXT:    vpinsrw $1, %ebx, %xmm0, %xmm0
+; AVXNC-NEXT:    vpinsrw $2, %r13d, %xmm0, %xmm0
+; AVXNC-NEXT:    vpinsrw $3, %r12d, %xmm0, %xmm0
+; AVXNC-NEXT:    vpinsrw $4, %r15d, %xmm0, %xmm0
+; AVXNC-NEXT:    vpinsrw $5, %r14d, %xmm0, %xmm0
+; AVXNC-NEXT:    vpinsrw $6, %ebp, %xmm0, %xmm0
+; AVXNC-NEXT:    vpinsrw $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVXNC-NEXT:    addq $40, %rsp
+; AVXNC-NEXT:    popq %rbx
+; AVXNC-NEXT:    popq %r12
+; AVXNC-NEXT:    popq %r13
+; AVXNC-NEXT:    popq %r14
+; AVXNC-NEXT:    popq %r15
+; AVXNC-NEXT:    popq %rbp
+; AVXNC-NEXT:    retq
   %add = fadd <8 x bfloat> %a, %b
   ret <8 x bfloat> %add
 }
@@ -677,6 +792,19 @@ define <2 x bfloat> @pr62997(bfloat %a, bfloat %b) {
 ; F16-NEXT:    vmovd %ecx, %xmm0
 ; F16-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0
 ; F16-NEXT:    retq
+;
+; AVXNC-LABEL: pr62997:
+; AVXNC:       # %bb.0:
+; AVXNC-NEXT:    vmovd %xmm1, %eax
+; AVXNC-NEXT:    vmovd %xmm0, %ecx
+; AVXNC-NEXT:    vmovd %ecx, %xmm0
+; AVXNC-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0
+; AVXNC-NEXT:    vmovd %xmm0, %eax
+; AVXNC-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; AVXNC-NEXT:    shrl $16, %eax
+; AVXNC-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
+; AVXNC-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVXNC-NEXT:    retq
   %1 = insertelement <2 x bfloat> undef, bfloat %a, i64 0
   %2 = insertelement <2 x bfloat> %1, bfloat %b, i64 1
   ret <2 x bfloat> %2
@@ -695,6 +823,12 @@ define <32 x bfloat> @pr63017() {
 ; F16:       # %bb.0:
 ; F16-NEXT:    vxorps %xmm0, %xmm0, %xmm0
 ; F16-NEXT:    retq
+;
+; AVXNC-LABEL: pr63017:
+; AVXNC:       # %bb.0:
+; AVXNC-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; AVXNC-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVXNC-NEXT:    retq
   ret <32 x bfloat> zeroinitializer
 }
 
@@ -1270,6 +1404,256 @@ define <32 x bfloat> @pr63017_2() nounwind {
 ; F16-NEXT:    vpbroadcastw {{.*#+}} zmm0 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024]
 ; F16-NEXT:    vmovdqu16 (%rax), %zmm0 {%k1}
 ; F16-NEXT:    retq
+;
+; AVXNC-LABEL: pr63017_2:
+; AVXNC:       # %bb.0:
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_1
+; AVXNC-NEXT:  # %bb.2: # %cond.load
+; AVXNC-NEXT:    vpbroadcastw {{.*#+}} ymm1 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024]
+; AVXNC-NEXT:    vpbroadcastw {{.*#+}} xmm0 = [49024,49024,49024,49024,49024,49024,49024,49024]
+; AVXNC-NEXT:    vpinsrw $0, (%rax), %xmm0, %xmm0
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVXNC-NEXT:    jmp .LBB12_3
+; AVXNC-NEXT:  .LBB12_1:
+; AVXNC-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024]
+; AVXNC-NEXT:    vmovdqa %ymm0, %ymm1
+; AVXNC-NEXT:  .LBB12_3: # %else
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_5
+; AVXNC-NEXT:  # %bb.4: # %cond.load1
+; AVXNC-NEXT:    vpinsrw $1, (%rax), %xmm0, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_5: # %else2
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_7
+; AVXNC-NEXT:  # %bb.6: # %cond.load4
+; AVXNC-NEXT:    vpinsrw $2, (%rax), %xmm0, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_7: # %else5
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_9
+; AVXNC-NEXT:  # %bb.8: # %cond.load7
+; AVXNC-NEXT:    vpinsrw $3, (%rax), %xmm0, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_9: # %else8
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_11
+; AVXNC-NEXT:  # %bb.10: # %cond.load10
+; AVXNC-NEXT:    vpinsrw $4, (%rax), %xmm0, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_11: # %else11
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_13
+; AVXNC-NEXT:  # %bb.12: # %cond.load13
+; AVXNC-NEXT:    vpinsrw $5, (%rax), %xmm0, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_13: # %else14
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_15
+; AVXNC-NEXT:  # %bb.14: # %cond.load16
+; AVXNC-NEXT:    vpinsrw $6, (%rax), %xmm0, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_15: # %else17
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_17
+; AVXNC-NEXT:  # %bb.16: # %cond.load19
+; AVXNC-NEXT:    vpinsrw $7, (%rax), %xmm0, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_17: # %else20
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_19
+; AVXNC-NEXT:  # %bb.18: # %cond.load22
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_19: # %else23
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_21
+; AVXNC-NEXT:  # %bb.20: # %cond.load25
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6,7,8],ymm2[9],ymm0[10,11,12,13,14,15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_21: # %else26
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_23
+; AVXNC-NEXT:  # %bb.22: # %cond.load28
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm2[2],ymm0[3,4,5,6,7,8,9],ymm2[10],ymm0[11,12,13,14,15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_23: # %else29
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_25
+; AVXNC-NEXT:  # %bb.24: # %cond.load31
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3],ymm0[4,5,6,7,8,9,10],ymm2[11],ymm0[12,13,14,15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_25: # %else32
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_27
+; AVXNC-NEXT:  # %bb.26: # %cond.load34
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4],ymm0[5,6,7,8,9,10,11],ymm2[12],ymm0[13,14,15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_27: # %else35
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_29
+; AVXNC-NEXT:  # %bb.28: # %cond.load37
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7,8,9,10,11,12],ymm2[13],ymm0[14,15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_29: # %else38
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_31
+; AVXNC-NEXT:  # %bb.30: # %cond.load40
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm2[6],ymm0[7,8,9,10,11,12,13],ymm2[14],ymm0[15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_31: # %else41
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_33
+; AVXNC-NEXT:  # %bb.32: # %cond.load43
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5,6],ymm2[7],ymm0[8,9,10,11,12,13,14],ymm2[15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_33: # %else44
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_35
+; AVXNC-NEXT:  # %bb.34: # %cond.load46
+; AVXNC-NEXT:    vpinsrw $0, (%rax), %xmm1, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_35: # %else47
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_37
+; AVXNC-NEXT:  # %bb.36: # %cond.load49
+; AVXNC-NEXT:    vpinsrw $1, (%rax), %xmm1, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_37: # %else50
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_39
+; AVXNC-NEXT:  # %bb.38: # %cond.load52
+; AVXNC-NEXT:    vpinsrw $2, (%rax), %xmm1, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_39: # %else53
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_41
+; AVXNC-NEXT:  # %bb.40: # %cond.load55
+; AVXNC-NEXT:    vpinsrw $3, (%rax), %xmm1, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_41: # %else56
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_43
+; AVXNC-NEXT:  # %bb.42: # %cond.load58
+; AVXNC-NEXT:    vpinsrw $4, (%rax), %xmm1, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_43: # %else59
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_45
+; AVXNC-NEXT:  # %bb.44: # %cond.load61
+; AVXNC-NEXT:    vpinsrw $5, (%rax), %xmm1, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_45: # %else62
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_47
+; AVXNC-NEXT:  # %bb.46: # %cond.load64
+; AVXNC-NEXT:    vpinsrw $6, (%rax), %xmm1, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_47: # %else65
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_49
+; AVXNC-NEXT:  # %bb.48: # %cond.load67
+; AVXNC-NEXT:    vpinsrw $7, (%rax), %xmm1, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_49: # %else68
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_51
+; AVXNC-NEXT:  # %bb.50: # %cond.load70
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm1[1,2,3,4,5,6,7],ymm2[8],ymm1[9,10,11,12,13,14,15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_51: # %else71
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_53
+; AVXNC-NEXT:  # %bb.52: # %cond.load73
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7,8],ymm2[9],ymm1[10,11,12,13,14,15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_53: # %else74
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_55
+; AVXNC-NEXT:  # %bb.54: # %cond.load76
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6,7,8,9],ymm2[10],ymm1[11,12,13,14,15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_55: # %else77
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_57
+; AVXNC-NEXT:  # %bb.56: # %cond.load79
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6,7,8,9,10],ymm2[11],ymm1[12,13,14,15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_57: # %else80
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_59
+; AVXNC-NEXT:  # %bb.58: # %cond.load82
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4],ymm1[5,6,7,8,9,10,11],ymm2[12],ymm1[13,14,15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_59: # %else83
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_61
+; AVXNC-NEXT:  # %bb.60: # %cond.load85
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7,8,9,10,11,12],ymm2[13],ymm1[14,15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_61: # %else86
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_63
+; AVXNC-NEXT:  # %bb.62: # %cond.load88
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7,8,9,10,11,12,13],ymm2[14],ymm1[15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_63: # %else89
+; AVXNC-NEXT:    xorl %eax, %eax
+; AVXNC-NEXT:    testb %al, %al
+; AVXNC-NEXT:    jne .LBB12_65
+; AVXNC-NEXT:  # %bb.64: # %cond.load91
+; AVXNC-NEXT:    vpbroadcastw (%rax), %ymm2
+; AVXNC-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5,6],ymm2[7],ymm1[8,9,10,11,12,13,14],ymm2[15]
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVXNC-NEXT:  .LBB12_65: # %else92
+; AVXNC-NEXT:    retq
   %1 = call <32 x bfloat> @llvm.masked.load.v32bf16.p0(ptr poison, i32 2, <32 x i1> poison, <32 x bfloat> <bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80>)
   ret <32 x bfloat> %1
 }
@@ -1295,6 +1679,13 @@ define <32 x bfloat> @pr62997_3(<32 x bfloat> %0, bfloat %1) {
 ; F16-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm1
 ; F16-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
 ; F16-NEXT:    retq
+;
+; AVXNC-LABEL: pr62997_3:
+; AVXNC:       # %bb.0:
+; AVXNC-NEXT:    vmovd %xmm2, %eax
+; AVXNC-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm2
+; AVXNC-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVXNC-NEXT:    retq
   %3 = insertelement <32 x bfloat> %0, bfloat %1, i64 1
   ret <32 x bfloat> %3
 }
@@ -1328,6 +1719,25 @@ define <4 x float> @pr64460_1(<4 x bfloat> %a) {
 ; F16-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; F16-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; F16-NEXT:    retq
+;
+; AVXNC-LABEL: pr64460_1:
+; AVXNC:       # %bb.0:
+; AVXNC-NEXT:    vpextrw $1, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vmovd %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm2
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVXNC-NEXT:    vpextrw $2, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm2
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVXNC-NEXT:    vpextrw $3, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm0
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVXNC-NEXT:    retq
   %b = fpext <4 x bfloat> %a to <4 x float>
   ret <4 x float> %b
 }
@@ -1377,6 +1787,41 @@ define <8 x float> @pr64460_2(<8 x bfloat> %a) {
 ; F16-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; F16-NEXT:    vpslld $16, %ymm0, %ymm0
 ; F16-NEXT:    retq
+;
+; AVXNC-LABEL: pr64460_2:
+; AVXNC:       # %bb.0:
+; AVXNC-NEXT:    vpextrw $5, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vpextrw $4, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm2
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVXNC-NEXT:    vpextrw $6, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm2
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVXNC-NEXT:    vpextrw $7, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm2
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
+; AVXNC-NEXT:    vpextrw $1, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm2
+; AVXNC-NEXT:    vmovd %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm3
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
+; AVXNC-NEXT:    vpextrw $2, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm3
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
+; AVXNC-NEXT:    vpextrw $3, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm0
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
+; AVXNC-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVXNC-NEXT:    retq
   %b = fpext <8 x bfloat> %a to <8 x float>
   ret <8 x float> %b
 }
@@ -1461,6 +1906,74 @@ define <16 x float> @pr64460_3(<16 x bfloat> %a) {
 ; F16-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
 ; F16-NEXT:    vpslld $16, %zmm0, %zmm0
 ; F16-NEXT:    retq
+;
+; AVXNC-LABEL: pr64460_3:
+; AVXNC:       # %bb.0:
+; AVXNC-NEXT:    vpextrw $5, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vpextrw $4, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm2
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVXNC-NEXT:    vpextrw $6, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm2
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVXNC-NEXT:    vpextrw $7, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm2
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
+; AVXNC-NEXT:    vpextrw $1, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm2
+; AVXNC-NEXT:    vmovd %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm3
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
+; AVXNC-NEXT:    vpextrw $2, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm3
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
+; AVXNC-NEXT:    vpextrw $3, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm3
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
+; AVXNC-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm2
+; AVXNC-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVXNC-NEXT:    vpextrw $5, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vpextrw $4, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm3
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3]
+; AVXNC-NEXT:    vpextrw $6, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm3
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; AVXNC-NEXT:    vpextrw $7, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm3
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0]
+; AVXNC-NEXT:    vpextrw $1, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm3
+; AVXNC-NEXT:    vmovd %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm4
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
+; AVXNC-NEXT:    vpextrw $2, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm4
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
+; AVXNC-NEXT:    vpextrw $3, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm0
+; AVXNC-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
+; AVXNC-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVXNC-NEXT:    vmovaps %ymm2, %ymm0
+; AVXNC-NEXT:    retq
   %b = fpext <16 x bfloat> %a to <16 x float>
   ret <16 x float> %b
 }
@@ -1517,6 +2030,49 @@ define <8 x double> @pr64460_4(<8 x bfloat> %a) {
 ; F16-NEXT:    vpslld $16, %ymm0, %ymm0
 ; F16-NEXT:    vcvtps2pd %ymm0, %zmm0
 ; F16-NEXT:    retq
+;
+; AVXNC-LABEL: pr64460_4:
+; AVXNC:       # %bb.0:
+; AVXNC-NEXT:    vpextrw $3, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
+; AVXNC-NEXT:    vpextrw $2, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm2
+; AVXNC-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
+; AVXNC-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVXNC-NEXT:    vpextrw $1, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm2
+; AVXNC-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
+; AVXNC-NEXT:    vmovd %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm3
+; AVXNC-NEXT:    vcvtss2sd %xmm3, %xmm3, %xmm3
+; AVXNC-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
+; AVXNC-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm2
+; AVXNC-NEXT:    vpextrw $7, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm1
+; AVXNC-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
+; AVXNC-NEXT:    vpextrw $6, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm3
+; AVXNC-NEXT:    vcvtss2sd %xmm3, %xmm3, %xmm3
+; AVXNC-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0]
+; AVXNC-NEXT:    vpextrw $5, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm3
+; AVXNC-NEXT:    vcvtss2sd %xmm3, %xmm3, %xmm3
+; AVXNC-NEXT:    vpextrw $4, %xmm0, %eax
+; AVXNC-NEXT:    shll $16, %eax
+; AVXNC-NEXT:    vmovd %eax, %xmm0
+; AVXNC-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVXNC-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; AVXNC-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVXNC-NEXT:    vmovaps %ymm2, %ymm0
+; AVXNC-NEXT:    retq
   %b = fpext <8 x bfloat> %a to <8 x double>
   ret <8 x double> %b
 }


        


More information about the llvm-branch-commits mailing list