[Mlir-commits] [mlir] 87782b2 - [mlir][x86vector] AVX512-BF16 Dot op (#124800)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Jan 29 04:07:45 PST 2025


Author: Adam Siemieniuk
Date: 2025-01-29T13:07:41+01:00
New Revision: 87782b216fd3e7a8f8b2de04d4af467b390e9a34

URL: https://github.com/llvm/llvm-project/commit/87782b216fd3e7a8f8b2de04d4af467b390e9a34
DIFF: https://github.com/llvm/llvm-project/commit/87782b216fd3e7a8f8b2de04d4af467b390e9a34.diff

LOG: [mlir][x86vector] AVX512-BF16 Dot op (#124800)

Adds AVX512 bf16 dot-product operation and defines lowering to LLVM
intrinsics.

AVX512 intrinsic operation definition is extended with an optional
extension field that allows specifying necessary LLVM mnemonic suffix
e.g., `"bf16"` for `x86_avx512bf16_` intrinsics.

Added: 
    mlir/test/Dialect/X86Vector/dot-bf16.mlir

Modified: 
    mlir/include/mlir/Dialect/X86Vector/X86Vector.td
    mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp
    mlir/test/Dialect/X86Vector/legalize-for-llvm.mlir
    mlir/test/Dialect/X86Vector/roundtrip.mlir
    mlir/test/Target/LLVMIR/x86vector.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/X86Vector/X86Vector.td b/mlir/include/mlir/Dialect/X86Vector/X86Vector.td
index fa3f0ee0460b1d..16181d7e760db5 100644
--- a/mlir/include/mlir/Dialect/X86Vector/X86Vector.td
+++ b/mlir/include/mlir/Dialect/X86Vector/X86Vector.td
@@ -35,17 +35,20 @@ class AVX512_Op<string mnemonic, list<Trait> traits = []> :
   Op<X86Vector_Dialect, "avx512." # mnemonic, traits> {}
 
 // Intrinsic operation used during lowering to LLVM IR.
-class AVX512_IntrOp<string mnemonic, int numResults, list<Trait> traits = []> :
+class AVX512_IntrOp<string mnemonic, int numResults,
+                    list<Trait> traits = [],
+                    string extension = ""> :
   LLVM_IntrOpBase<X86Vector_Dialect, "avx512.intr." # mnemonic,
-                  "x86_avx512_" # !subst(".", "_", mnemonic),
+                  !subst("EXT", extension, "x86_avx512EXT_") # !subst(".", "_", mnemonic),
                   [], [], traits, numResults>;
 
 // Defined by first result overload. May have to be extended for other
 // instructions in the future.
 class AVX512_IntrOverloadedOp<string mnemonic,
-                              list<Trait> traits = []> :
+                              list<Trait> traits = [],
+                              string extension = ""> :
   LLVM_IntrOpBase<X86Vector_Dialect, "avx512.intr." # mnemonic,
-                  "x86_avx512_" # !subst(".", "_", mnemonic),
+                  !subst("EXT", extension, "x86_avx512EXT_") # !subst(".", "_", mnemonic),
                   /*list<int> overloadedResults=*/[0],
                   /*list<int> overloadedOperands=*/[],
                   traits, /*numResults=*/1>;
@@ -271,6 +274,73 @@ def Vp2IntersectQIntrOp : AVX512_IntrOp<"vp2intersect.q.512", 2, [
                    VectorOfLengthAndType<[8], [I64]>:$b);
 }
 
+//----------------------------------------------------------------------------//
+// Dot BF16
+//----------------------------------------------------------------------------//
+
+def DotBF16Op : AVX512_Op<"dot", [Pure,
+  AllTypesMatch<["a", "b"]>,
+  AllTypesMatch<["src", "dst"]>,
+  TypesMatchWith<"`a` has twice an many elements as `src`",
+                 "src", "a",
+                 "VectorType::get({::llvm::cast<VectorType>($_self).getShape()[0] * 2}, "
+                 "BFloat16Type::get($_self.getContext()))">]> {
+  let summary = "Dot BF16 op";
+  let description = [{
+    The `dot` op is an AVX512-BF16 specific op that can lower to the proper
+    LLVMAVX512BF16 operation `llvm.dpbf16ps` depending on the width of MLIR
+    vectors it is applied to.
+
+    #### From the Intel Intrinsics Guide:
+
+    Compute dot-product of BF16 (16-bit) floating-point pairs in `a` and `b`,
+    accumulating the intermediate single-precision (32-bit) floating-point
+    elements with elements in `src`, and store the results in `dst`.
+
+    Example:
+    ```mlir
+    %0 = x86vector.avx512.dot %src, %a, %b : vector<32xbf16> -> vector<16xf32>
+    ```
+  }];
+  let arguments = (ins VectorOfLengthAndType<[4, 8, 16], [F32]>:$src,
+                   VectorOfLengthAndType<[8, 16, 32], [BF16]>:$a,
+                   VectorOfLengthAndType<[8, 16, 32], [BF16]>:$b
+                   );
+  let results = (outs VectorOfLengthAndType<[4, 8, 16], [F32]>:$dst);
+  let assemblyFormat =
+    "$src `,` $a `,` $b attr-dict `:` type($a) `->` type($src)";
+}
+
+def DotBF16Ps128IntrOp : AVX512_IntrOp<"dpbf16ps.128", 1, [Pure,
+    AllTypesMatch<["a", "b"]>,
+    AllTypesMatch<["src", "res"]>],
+    /*extension=*/"bf16"> {
+  let arguments = (ins VectorOfLengthAndType<[4], [F32]>:$src,
+                       VectorOfLengthAndType<[8], [BF16]>:$a,
+                       VectorOfLengthAndType<[8], [BF16]>:$b);
+  let results = (outs VectorOfLengthAndType<[4], [F32]>:$res);
+}
+
+def DotBF16Ps256IntrOp : AVX512_IntrOp<"dpbf16ps.256", 1, [Pure,
+    AllTypesMatch<["a", "b"]>,
+    AllTypesMatch<["src", "res"]>],
+    /*extension=*/"bf16"> {
+  let arguments = (ins VectorOfLengthAndType<[8], [F32]>:$src,
+                       VectorOfLengthAndType<[16], [BF16]>:$a,
+                       VectorOfLengthAndType<[16], [BF16]>:$b);
+  let results = (outs VectorOfLengthAndType<[8], [F32]>:$res);
+}
+
+def DotBF16Ps512IntrOp : AVX512_IntrOp<"dpbf16ps.512", 1, [Pure,
+    AllTypesMatch<["a", "b"]>,
+    AllTypesMatch<["src", "res"]>],
+    /*extension=*/"bf16"> {
+  let arguments = (ins VectorOfLengthAndType<[16], [F32]>:$src,
+                       VectorOfLengthAndType<[32], [BF16]>:$a,
+                       VectorOfLengthAndType<[32], [BF16]>:$b);
+  let results = (outs VectorOfLengthAndType<[16], [F32]>:$res);
+}
+
 //===----------------------------------------------------------------------===//
 // AVX op definitions
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp b/mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp
index e918473cae9e3a..260ac9ce589a38 100644
--- a/mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp
+++ b/mlir/lib/Dialect/X86Vector/Transforms/LegalizeForLLVMExport.cpp
@@ -90,6 +90,47 @@ struct MaskCompressOpConversion
   }
 };
 
+struct DotBF16OpConversion : public ConvertOpToLLVMPattern<DotBF16Op> {
+  using ConvertOpToLLVMPattern<DotBF16Op>::ConvertOpToLLVMPattern;
+
+  LogicalResult
+  matchAndRewrite(DotBF16Op op, OpAdaptor adaptor,
+                  ConversionPatternRewriter &rewriter) const override {
+    auto typeA = dyn_cast<VectorType>(op.getA().getType());
+    unsigned elemBitWidth = typeA.getElementTypeBitWidth();
+    unsigned opBitWidth = typeA.getShape()[0] * elemBitWidth;
+
+    auto opType = adaptor.getSrc().getType();
+    auto opSrc = adaptor.getSrc();
+    auto opA = adaptor.getA();
+    auto opB = adaptor.getB();
+
+    switch (opBitWidth) {
+    case 128: {
+      rewriter.replaceOpWithNewOp<DotBF16Ps128IntrOp>(op, opType, opSrc, opA,
+                                                      opB);
+      break;
+    }
+    case 256: {
+      rewriter.replaceOpWithNewOp<DotBF16Ps256IntrOp>(op, opType, opSrc, opA,
+                                                      opB);
+      break;
+    }
+    case 512: {
+      rewriter.replaceOpWithNewOp<DotBF16Ps512IntrOp>(op, opType, opSrc, opA,
+                                                      opB);
+      break;
+    }
+    default: {
+      return rewriter.notifyMatchFailure(op,
+                                         "unsupported AVX512-BF16 dot variant");
+    }
+    }
+
+    return success();
+  }
+};
+
 struct RsqrtOpConversion : public ConvertOpToLLVMPattern<RsqrtOp> {
   using ConvertOpToLLVMPattern<RsqrtOp>::ConvertOpToLLVMPattern;
 
@@ -161,8 +202,8 @@ using Registry = RegistryImpl<
 void mlir::populateX86VectorLegalizeForLLVMExportPatterns(
     const LLVMTypeConverter &converter, RewritePatternSet &patterns) {
   Registry::registerPatterns(converter, patterns);
-  patterns.add<MaskCompressOpConversion, RsqrtOpConversion, DotOpConversion>(
-      converter);
+  patterns.add<MaskCompressOpConversion, DotBF16OpConversion, RsqrtOpConversion,
+               DotOpConversion>(converter);
 }
 
 void mlir::configureX86VectorLegalizeForExportTarget(
@@ -170,6 +211,10 @@ void mlir::configureX86VectorLegalizeForExportTarget(
   Registry::configureTarget(target);
   target.addLegalOp<MaskCompressIntrOp>();
   target.addIllegalOp<MaskCompressOp>();
+  target.addLegalOp<DotBF16Ps128IntrOp>();
+  target.addLegalOp<DotBF16Ps256IntrOp>();
+  target.addLegalOp<DotBF16Ps512IntrOp>();
+  target.addIllegalOp<DotBF16Op>();
   target.addLegalOp<RsqrtIntrOp>();
   target.addIllegalOp<RsqrtOp>();
   target.addLegalOp<DotIntrOp>();

diff  --git a/mlir/test/Dialect/X86Vector/dot-bf16.mlir b/mlir/test/Dialect/X86Vector/dot-bf16.mlir
new file mode 100644
index 00000000000000..8c7bf6490800e3
--- /dev/null
+++ b/mlir/test/Dialect/X86Vector/dot-bf16.mlir
@@ -0,0 +1,30 @@
+// RUN: mlir-opt %s \
+// RUN:   -convert-vector-to-llvm="enable-x86vector" -convert-to-llvm \
+// RUN:   -reconcile-unrealized-casts | \
+// RUN: mlir-translate --mlir-to-llvmir | \
+// RUN: llc -mcpu=sapphirerapids | \
+// RUN: FileCheck %s
+
+func.func @avx512bf16_dot_128(%src: vector<4xf32>, %a: vector<8xbf16>,
+    %b: vector<8xbf16>) -> vector<4xf32> {
+  %0 = x86vector.avx512.dot %src, %a, %b : vector<8xbf16> -> vector<4xf32>
+  return %0 : vector<4xf32>
+}
+// CHECK-LABEL: avx512bf16_dot_128:
+// CHECK: vdpbf16ps{{.*}}%xmm
+
+func.func @avx512bf16_dot_256(%src: vector<8xf32>, %a: vector<16xbf16>,
+    %b: vector<16xbf16>) -> vector<8xf32> {
+  %0 = x86vector.avx512.dot %src, %a, %b : vector<16xbf16> -> vector<8xf32>
+  return %0 : vector<8xf32>
+}
+// CHECK-LABEL: avx512bf16_dot_256:
+// CHECK: vdpbf16ps{{.*}}%ymm
+
+func.func @avx512bf16_dot_512(%src: vector<16xf32>, %a: vector<32xbf16>,
+    %b: vector<32xbf16>) -> vector<16xf32> {
+  %0 = x86vector.avx512.dot %src, %a, %b : vector<32xbf16> -> vector<16xf32>
+  return %0 : vector<16xf32>
+}
+// CHECK-LABEL: avx512bf16_dot_512:
+// CHECK: vdpbf16ps{{.*}}%zmm

diff  --git a/mlir/test/Dialect/X86Vector/legalize-for-llvm.mlir b/mlir/test/Dialect/X86Vector/legalize-for-llvm.mlir
index 8b9006395fdfe4..ed9177eaec9ce4 100644
--- a/mlir/test/Dialect/X86Vector/legalize-for-llvm.mlir
+++ b/mlir/test/Dialect/X86Vector/legalize-for-llvm.mlir
@@ -43,6 +43,33 @@ func.func @avx512_vp2intersect(%a: vector<16xi32>, %b: vector<8xi64>)
   return %0, %1, %2, %3 : vector<16xi1>, vector<16xi1>, vector<8xi1>, vector<8xi1>
 }
 
+// CHECK-LABEL: func @avx512bf16_dot_128
+func.func @avx512bf16_dot_128(%src: vector<4xf32>, %a: vector<8xbf16>,
+  %b: vector<8xbf16>) -> (vector<4xf32>)
+{
+  // CHECK: x86vector.avx512.intr.dpbf16ps.128
+  %0 = x86vector.avx512.dot %src, %a, %b : vector<8xbf16> -> vector<4xf32>
+  return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: func @avx512bf16_dot_256
+func.func @avx512bf16_dot_256(%src: vector<8xf32>, %a: vector<16xbf16>,
+  %b: vector<16xbf16>) -> (vector<8xf32>)
+{
+  // CHECK: x86vector.avx512.intr.dpbf16ps.256
+  %0 = x86vector.avx512.dot %src, %a, %b : vector<16xbf16> -> vector<8xf32>
+  return %0 : vector<8xf32>
+}
+
+// CHECK-LABEL: func @avx512bf16_dot_512
+func.func @avx512bf16_dot_512(%src: vector<16xf32>, %a: vector<32xbf16>,
+  %b: vector<32xbf16>) -> (vector<16xf32>)
+{
+  // CHECK: x86vector.avx512.intr.dpbf16ps.512
+  %0 = x86vector.avx512.dot %src, %a, %b : vector<32xbf16> -> vector<16xf32>
+  return %0 : vector<16xf32>
+}
+
 // CHECK-LABEL: func @avx_rsqrt
 func.func @avx_rsqrt(%a: vector<8xf32>) -> (vector<8xf32>)
 {

diff  --git a/mlir/test/Dialect/X86Vector/roundtrip.mlir b/mlir/test/Dialect/X86Vector/roundtrip.mlir
index 557978b51c5123..cf74a7ee602558 100644
--- a/mlir/test/Dialect/X86Vector/roundtrip.mlir
+++ b/mlir/test/Dialect/X86Vector/roundtrip.mlir
@@ -47,6 +47,33 @@ func.func @avx512_mask_compress(%k1: vector<16xi1>, %a1: vector<16xf32>,
   return %0, %1, %2 : vector<16xf32>, vector<16xf32>, vector<8xi64>
 }
 
+// CHECK-LABEL: func @avx512bf16_dot_128
+func.func @avx512bf16_dot_128(%src: vector<4xf32>, %a: vector<8xbf16>,
+  %b: vector<8xbf16>) -> (vector<4xf32>)
+{
+  // CHECK: x86vector.avx512.dot {{.*}} : vector<8xbf16> -> vector<4xf32>
+  %0 = x86vector.avx512.dot %src, %a, %b : vector<8xbf16> -> vector<4xf32>
+  return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: func @avx512bf16_dot_256
+func.func @avx512bf16_dot_256(%src: vector<8xf32>, %a: vector<16xbf16>,
+  %b: vector<16xbf16>) -> (vector<8xf32>)
+{
+  // CHECK: x86vector.avx512.dot {{.*}} : vector<16xbf16> -> vector<8xf32>
+  %0 = x86vector.avx512.dot %src, %a, %b : vector<16xbf16> -> vector<8xf32>
+  return %0 : vector<8xf32>
+}
+
+// CHECK-LABEL: func @avx512bf16_dot_512
+func.func @avx512bf16_dot_512(%src: vector<16xf32>, %a: vector<32xbf16>,
+  %b: vector<32xbf16>) -> (vector<16xf32>)
+{
+  // CHECK: x86vector.avx512.dot {{.*}} : vector<32xbf16> -> vector<16xf32>
+  %0 = x86vector.avx512.dot %src, %a, %b : vector<32xbf16> -> vector<16xf32>
+  return %0 : vector<16xf32>
+}
+
 // CHECK-LABEL: func @avx_rsqrt
 func.func @avx_rsqrt(%a: vector<8xf32>) -> (vector<8xf32>)
 {

diff  --git a/mlir/test/Target/LLVMIR/x86vector.mlir b/mlir/test/Target/LLVMIR/x86vector.mlir
index 190732868cb7ad..1df03f10c93214 100644
--- a/mlir/test/Target/LLVMIR/x86vector.mlir
+++ b/mlir/test/Target/LLVMIR/x86vector.mlir
@@ -60,6 +60,39 @@ llvm.func @LLVM_x86_vp2intersect_q_512(%a: vector<8xi64>, %b: vector<8xi64>)
   llvm.return %0 : !llvm.struct<(vector<8 x i1>, vector<8 x i1>)>
 }
 
+// CHECK-LABEL: define <4 x float> @LLVM_x86_avx512bf16_dpbf16ps_128
+llvm.func @LLVM_x86_avx512bf16_dpbf16ps_128(
+    %arg0: vector<4xf32>, %arg1: vector<8xbf16>, %arg2: vector<8xbf16>
+  ) -> vector<4xf32>
+{
+  // CHECK: call <4 x float> @llvm.x86.avx512bf16.dpbf16ps.128(
+  %0 = "x86vector.avx512.intr.dpbf16ps.128"(%arg0, %arg1, %arg2)
+    : (vector<4xf32>, vector<8xbf16>, vector<8xbf16>) -> vector<4xf32>
+  llvm.return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: define <8 x float> @LLVM_x86_avx512bf16_dpbf16ps_256
+llvm.func @LLVM_x86_avx512bf16_dpbf16ps_256(
+    %arg0: vector<8xf32>, %arg1: vector<16xbf16>, %arg2: vector<16xbf16>
+  ) -> vector<8xf32>
+{
+  // CHECK: call <8 x float> @llvm.x86.avx512bf16.dpbf16ps.256(
+  %0 = "x86vector.avx512.intr.dpbf16ps.256"(%arg0, %arg1, %arg2)
+    : (vector<8xf32>, vector<16xbf16>, vector<16xbf16>) -> vector<8xf32>
+  llvm.return %0 : vector<8xf32>
+}
+
+// CHECK-LABEL: define <16 x float> @LLVM_x86_avx512bf16_dpbf16ps_512
+llvm.func @LLVM_x86_avx512bf16_dpbf16ps_512(
+    %arg0: vector<16xf32>, %arg1: vector<32xbf16>, %arg2: vector<32xbf16>
+  ) -> vector<16xf32>
+{
+  // CHECK: call <16 x float> @llvm.x86.avx512bf16.dpbf16ps.512(
+  %0 = "x86vector.avx512.intr.dpbf16ps.512"(%arg0, %arg1, %arg2)
+    : (vector<16xf32>, vector<32xbf16>, vector<32xbf16>) -> vector<16xf32>
+  llvm.return %0 : vector<16xf32>
+}
+
 // CHECK-LABEL: define <8 x float> @LLVM_x86_avx_rsqrt_ps_256
 llvm.func @LLVM_x86_avx_rsqrt_ps_256(%a: vector <8xf32>) -> vector<8xf32>
 {
@@ -67,3 +100,14 @@ llvm.func @LLVM_x86_avx_rsqrt_ps_256(%a: vector <8xf32>) -> vector<8xf32>
   %0 = "x86vector.avx.intr.rsqrt.ps.256"(%a) : (vector<8xf32>) -> (vector<8xf32>)
   llvm.return %0 : vector<8xf32>
 }
+
+// CHECK-LABEL: define <8 x float> @LLVM_x86_avx_dp_ps_256
+llvm.func @LLVM_x86_avx_dp_ps_256(
+    %arg0: vector<8xf32>, %arg1: vector<8xf32>
+  ) -> vector<8xf32>
+{
+  // CHECK: call <8 x float> @llvm.x86.avx.dp.ps.256(
+  %0 = llvm.mlir.constant(-1 : i8) : i8
+  %1 = "x86vector.avx.intr.dp.ps.256"(%arg0, %arg1, %0) : (vector<8xf32>, vector<8xf32>, i8) -> vector<8xf32>
+  llvm.return %1 : vector<8xf32>
+}


        


More information about the Mlir-commits mailing list