[llvm] 96df79a - [X86] Support load/store for bf16 in avx

Xiang1 Zhang via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 15 22:39:45 PST 2023


Author: Xiang1 Zhang
Date: 2023-02-16T14:39:35+08:00
New Revision: 96df79af029b85616ab90e73143e0e8ae89c7b46

URL: https://github.com/llvm/llvm-project/commit/96df79af029b85616ab90e73143e0e8ae89c7b46
DIFF: https://github.com/llvm/llvm-project/commit/96df79af029b85616ab90e73143e0e8ae89c7b46.diff

LOG: [X86] Support load/store for bf16 in avx

Reviewed By: LuoYuanke
Differential Revision: https://reviews.llvm.org/D144163

Added: 
    llvm/test/CodeGen/X86/avx512bf16-mov.ll

Modified: 
    llvm/lib/Target/X86/X86InstrSSE.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index 561ba99db4afb..f8660a9fa123a 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -577,20 +577,37 @@ let Predicates = [HasAVX, NoVLX] in {
 
   def : Pat<(alignedloadv8f16 addr:$src),
             (VMOVAPSrm addr:$src)>;
+  def : Pat<(alignedloadv8bf16 addr:$src),
+            (VMOVAPSrm addr:$src)>;
   def : Pat<(loadv8f16 addr:$src),
             (VMOVUPSrm addr:$src)>;
+  def : Pat<(loadv8bf16 addr:$src),
+            (VMOVUPSrm addr:$src)>;
   def : Pat<(alignedstore (v8f16 VR128:$src), addr:$dst),
             (VMOVAPSmr addr:$dst, VR128:$src)>;
+  def : Pat<(alignedstore (v8bf16 VR128:$src), addr:$dst),
+            (VMOVAPSmr addr:$dst, VR128:$src)>;
   def : Pat<(store (v8f16 VR128:$src), addr:$dst),
             (VMOVUPSmr addr:$dst, VR128:$src)>;
+  def : Pat<(store (v8bf16 VR128:$src), addr:$dst),
+            (VMOVUPSmr addr:$dst, VR128:$src)>;
+
   def : Pat<(alignedloadv16f16 addr:$src),
             (VMOVAPSYrm addr:$src)>;
+  def : Pat<(alignedloadv16bf16 addr:$src),
+            (VMOVAPSYrm addr:$src)>;
   def : Pat<(loadv16f16 addr:$src),
             (VMOVUPSYrm addr:$src)>;
+  def : Pat<(loadv16bf16 addr:$src),
+            (VMOVUPSYrm addr:$src)>;
   def : Pat<(alignedstore (v16f16 VR256:$src), addr:$dst),
             (VMOVAPSYmr addr:$dst, VR256:$src)>;
+  def : Pat<(alignedstore (v16bf16 VR256:$src), addr:$dst),
+            (VMOVAPSYmr addr:$dst, VR256:$src)>;
   def : Pat<(store (v16f16 VR256:$src), addr:$dst),
             (VMOVUPSYmr addr:$dst, VR256:$src)>;
+  def : Pat<(store (v16bf16 VR256:$src), addr:$dst),
+            (VMOVUPSYmr addr:$dst, VR256:$src)>;
 }
 
 // Use movaps / movups for SSE integer load / store (one byte shorter).

diff  --git a/llvm/test/CodeGen/X86/avx512bf16-mov.ll b/llvm/test/CodeGen/X86/avx512bf16-mov.ll
new file mode 100644
index 0000000000000..da52c42a41600
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avx512bf16-mov.ll
@@ -0,0 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bf16 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512bf16 | FileCheck %s --check-prefix=X86
+
+define dso_local void @funbf16(ptr readonly %src, ptr writeonly %dst) {
+; X64-LABEL: funbf16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vmovups (%rdi), %xmm0
+; X64-NEXT:    vmovups %xmm0, (%rsi)
+; X64-NEXT:    vmovaps (%rdi), %xmm0
+; X64-NEXT:    vmovaps %xmm0, (%rsi)
+; X64-NEXT:    vmovups (%rdi), %ymm0
+; X64-NEXT:    vmovups %ymm0, (%rsi)
+; X64-NEXT:    vmovaps (%rdi), %ymm0
+; X64-NEXT:    vmovaps %ymm0, (%rsi)
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: funbf16:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    vmovups (%ecx), %xmm0
+; X86-NEXT:    vmovups %xmm0, (%eax)
+; X86-NEXT:    vmovaps (%ecx), %xmm0
+; X86-NEXT:    vmovaps %xmm0, (%eax)
+; X86-NEXT:    vmovups (%ecx), %ymm0
+; X86-NEXT:    vmovups %ymm0, (%eax)
+; X86-NEXT:    vmovaps (%ecx), %ymm0
+; X86-NEXT:    vmovaps %ymm0, (%eax)
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
+entry:
+  %0 = load <8 x bfloat>, ptr %src, align 1
+  store <8 x bfloat> %0, ptr %dst, align 1
+  %1 = load <8 x bfloat>, ptr %src, align 32
+  store <8 x bfloat> %1, ptr %dst, align 32
+  %2 = load <16 x bfloat>, ptr %src, align 1
+  store <16 x bfloat> %2, ptr %dst, align 1
+  %3 = load <16 x bfloat>, ptr %src, align 32
+  store <16 x bfloat> %3, ptr %dst, align 32
+  ret void
+}


        


More information about the llvm-commits mailing list