[Mlir-commits] [mlir] ea8ed5c - [mlir][sparse] Add F16 and BF16.

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Jun 8 09:51:09 PDT 2022


Author: bixia1
Date: 2022-06-08T09:51:05-07:00
New Revision: ea8ed5cbcfac973e28c9a24603395e5327a738f5

URL: https://github.com/llvm/llvm-project/commit/ea8ed5cbcfac973e28c9a24603395e5327a738f5
DIFF: https://github.com/llvm/llvm-project/commit/ea8ed5cbcfac973e28c9a24603395e5327a738f5.diff

LOG: [mlir][sparse] Add F16 and BF16.

This is the first PR to add `F16` and `BF16` support to the sparse codegen. There are still problems in supporting these two data types, such as `BF16` is not quite working yet.

Add tests cases.

Reviewed By: aartbik

Differential Revision: https://reviews.llvm.org/D127010

Added: 
    mlir/include/mlir/ExecutionEngine/Float16bits.h
    mlir/lib/ExecutionEngine/Float16bits.cpp
    mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
    mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir

Modified: 
    mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h
    mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
    mlir/lib/ExecutionEngine/CMakeLists.txt
    mlir/lib/ExecutionEngine/SparseTensorUtils.cpp
    mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir
    utils/bazel/llvm-project-overlay/mlir/BUILD.bazel

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/ExecutionEngine/Float16bits.h b/mlir/include/mlir/ExecutionEngine/Float16bits.h
new file mode 100644
index 0000000000000..7362f665ea656
--- /dev/null
+++ b/mlir/include/mlir/ExecutionEngine/Float16bits.h
@@ -0,0 +1,39 @@
+//===--- Float16bits.h - supports 2-byte floats ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements f16 and bf16 to support the compilation and execution
+// of programs using these types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_EXECUTIONENGINE_FLOAT16BITS_H_
+#define MLIR_EXECUTIONENGINE_FLOAT16BITS_H_
+
+#include <cstdint>
+#include <iostream>
+
+// Implements half precision and bfloat with f16 and bf16, using the MLIR type
+// names. These data types are also used for c-interface runtime routines.
+extern "C" {
+struct f16 {
+  f16(float f = 0);
+  uint16_t bits;
+};
+
+struct bf16 {
+  bf16(float f = 0);
+  uint16_t bits;
+};
+}
+
+// Outputs a half precision value.
+std::ostream &operator<<(std::ostream &os, const f16 &f);
+// Outputs a bfloat value.
+std::ostream &operator<<(std::ostream &os, const bf16 &d);
+
+#endif // MLIR_EXECUTIONENGINE_FLOAT16BITS_H_

diff  --git a/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h b/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h
index 0b51bc2a354f2..7f2bbae0e71bb 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h
@@ -15,6 +15,7 @@
 #define MLIR_EXECUTIONENGINE_SPARSETENSORUTILS_H_
 
 #include "mlir/ExecutionEngine/CRunnerUtils.h"
+#include "mlir/ExecutionEngine/Float16bits.h"
 
 #include <cinttypes>
 #include <complex>
@@ -77,12 +78,14 @@ using complex32 = std::complex<float>;
 enum class PrimaryType : uint32_t {
   kF64 = 1,
   kF32 = 2,
-  kI64 = 3,
-  kI32 = 4,
-  kI16 = 5,
-  kI8 = 6,
-  kC64 = 7,
-  kC32 = 8
+  kF16 = 3,
+  kBF16 = 4,
+  kI64 = 5,
+  kI32 = 6,
+  kI16 = 7,
+  kI8 = 8,
+  kC64 = 9,
+  kC32 = 10
 };
 
 // This x-macro only specifies the non-complex `V` types, because the ABI
@@ -97,6 +100,8 @@ enum class PrimaryType : uint32_t {
 #define FOREVERY_SIMPLEX_V(DO)                                                 \
   DO(F64, double)                                                              \
   DO(F32, float)                                                               \
+  DO(F16, f16)                                                                 \
+  DO(BF16, bf16)                                                               \
   DO(I64, int64_t)                                                             \
   DO(I32, int32_t)                                                             \
   DO(I16, int16_t)                                                             \

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
index 40d4b756a10b8..5533d02ceb0a1 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
@@ -104,6 +104,10 @@ PrimaryType mlir::sparse_tensor::primaryTypeEncoding(Type elemTp) {
     return PrimaryType::kF64;
   if (elemTp.isF32())
     return PrimaryType::kF32;
+  if (elemTp.isF16())
+    return PrimaryType::kF16;
+  if (elemTp.isBF16())
+    return PrimaryType::kBF16;
   if (elemTp.isInteger(64))
     return PrimaryType::kI64;
   if (elemTp.isInteger(32))

diff  --git a/mlir/lib/ExecutionEngine/CMakeLists.txt b/mlir/lib/ExecutionEngine/CMakeLists.txt
index 76c4d3ddf8ed1..ae384551429dc 100644
--- a/mlir/lib/ExecutionEngine/CMakeLists.txt
+++ b/mlir/lib/ExecutionEngine/CMakeLists.txt
@@ -7,6 +7,7 @@ set(LLVM_OPTIONAL_SOURCES
   CudaRuntimeWrappers.cpp
   SparseTensorUtils.cpp
   ExecutionEngine.cpp
+  Float16bits.cpp
   RocmRuntimeWrappers.cpp
   RunnerUtils.cpp
   OptUtils.cpp
@@ -121,6 +122,7 @@ add_mlir_library(MLIRJitRunner
 add_mlir_library(mlir_c_runner_utils
   SHARED
   CRunnerUtils.cpp
+  Float16bits.cpp
   SparseTensorUtils.cpp
 
   EXCLUDE_FROM_LIBMLIR

diff  --git a/mlir/lib/ExecutionEngine/Float16bits.cpp b/mlir/lib/ExecutionEngine/Float16bits.cpp
new file mode 100644
index 0000000000000..f9f2bbc7333c8
--- /dev/null
+++ b/mlir/lib/ExecutionEngine/Float16bits.cpp
@@ -0,0 +1,143 @@
+//===--- Float16bits.cpp - supports 2-byte floats  ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements f16 and bf16 to support the compilation and execution
+// of programs using these types.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/ExecutionEngine/Float16bits.h"
+
+namespace {
+
+// Union used to make the int/float aliasing explicit so we can access the raw
+// bits.
+union Float32Bits {
+  uint32_t u;
+  float f;
+};
+
+const uint32_t kF32MantiBits = 23;
+const uint32_t kF32HalfMantiBitDiff = 13;
+const uint32_t kF32HalfBitDiff = 16;
+const Float32Bits kF32Magic = {113 << kF32MantiBits};
+const uint32_t kF32HalfExpAdjust = (127 - 15) << kF32MantiBits;
+
+// Constructs the 16 bit representation for a half precision value from a float
+// value. This implementation is adapted from Eigen.
+uint16_t float2half(float floatValue) {
+  const Float32Bits inf = {255 << kF32MantiBits};
+  const Float32Bits f16max = {(127 + 16) << kF32MantiBits};
+  const Float32Bits denormMagic = {((127 - 15) + (kF32MantiBits - 10) + 1)
+                                   << kF32MantiBits};
+  uint32_t signMask = 0x80000000u;
+  uint16_t halfValue = static_cast<uint16_t>(0x0u);
+  Float32Bits f;
+  f.f = floatValue;
+  uint32_t sign = f.u & signMask;
+  f.u ^= sign;
+
+  if (f.u >= f16max.u) {
+    const uint32_t halfQnan = 0x7e00;
+    const uint32_t halfInf = 0x7c00;
+    // Inf or NaN (all exponent bits set).
+    halfValue = (f.u > inf.u) ? halfQnan : halfInf; // NaN->qNaN and Inf->Inf
+  } else {
+    // (De)normalized number or zero.
+    if (f.u < kF32Magic.u) {
+      // The resulting FP16 is subnormal or zero.
+      //
+      // Use a magic value to align our 10 mantissa bits at the bottom of the
+      // float. As long as FP addition is round-to-nearest-even this works.
+      f.f += denormMagic.f;
+
+      halfValue = static_cast<uint16_t>(f.u - denormMagic.u);
+    } else {
+      uint32_t mantOdd =
+          (f.u >> kF32HalfMantiBitDiff) & 1; // Resulting mantissa is odd.
+
+      // Update exponent, rounding bias part 1. The following expressions are
+      // equivalent to `f.u += ((unsigned int)(15 - 127) << kF32MantiBits) +
+      // 0xfff`, but without arithmetic overflow.
+      f.u += 0xc8000fffU;
+      // Rounding bias part 2.
+      f.u += mantOdd;
+      halfValue = static_cast<uint16_t>(f.u >> kF32HalfMantiBitDiff);
+    }
+  }
+
+  halfValue |= static_cast<uint16_t>(sign >> kF32HalfBitDiff);
+  return halfValue;
+}
+
+// Converts the 16 bit representation of a half precision value to a float
+// value. This implementation is adapted from Eigen.
+float half2float(uint16_t halfValue) {
+  const uint32_t shiftedExp =
+      0x7c00 << kF32HalfMantiBitDiff; // Exponent mask after shift.
+
+  // Initialize the float representation with the exponent/mantissa bits.
+  Float32Bits f = {
+      static_cast<uint32_t>((halfValue & 0x7fff) << kF32HalfMantiBitDiff)};
+  const uint32_t exp = shiftedExp & f.u;
+  f.u += kF32HalfExpAdjust; // Adjust the exponent
+
+  // Handle exponent special cases.
+  if (exp == shiftedExp) {
+    // Inf/NaN
+    f.u += kF32HalfExpAdjust;
+  } else if (exp == 0) {
+    // Zero/Denormal?
+    f.u += 1 << kF32MantiBits;
+    f.f -= kF32Magic.f;
+  }
+
+  f.u |= (halfValue & 0x8000) << kF32HalfBitDiff; // Sign bit.
+  return f.f;
+}
+
+const uint32_t kF32BfMantiBitDiff = 16;
+
+// Constructs the 16 bit representation for a bfloat value from a float value.
+// This implementation is adapted from Eigen.
+uint16_t float2bfloat(float floatValue) {
+  Float32Bits floatBits;
+  floatBits.f = floatValue;
+  uint16_t bfloatBits;
+
+  // Least significant bit of resulting bfloat.
+  uint32_t lsb = (floatBits.u >> kF32BfMantiBitDiff) & 1;
+  uint32_t rounding_bias = 0x7fff + lsb;
+  floatBits.u += rounding_bias;
+  bfloatBits = static_cast<uint16_t>(floatBits.u >> kF32BfMantiBitDiff);
+  return bfloatBits;
+}
+
+// Converts the 16 bit representation of a bfloat value to a float value. This
+// implementation is adapted from Eigen.
+float bfloat2float(uint16_t bfloatBits) {
+  Float32Bits floatBits;
+  floatBits.u = static_cast<uint32_t>(bfloatBits) << kF32BfMantiBitDiff;
+  return floatBits.f;
+}
+
+} // namespace
+
+f16::f16(float f) : bits(float2half(f)) {}
+
+bf16::bf16(float f) : bits(float2bfloat(f)) {}
+
+std::ostream &operator<<(std::ostream &os, const f16 &f) {
+  os << half2float(f.bits);
+  return os;
+}
+
+std::ostream &operator<<(std::ostream &os, const bf16 &d) {
+  os << bfloat2float(d.bits);
+  return os;
+}

diff  --git a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp
index d307e5247ce57..2ec72b0e3172e 100644
--- a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp
@@ -1567,6 +1567,16 @@ _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT
   CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF32, uint8_t,
        uint8_t, float);
 
+  // Two-byte floats with both overheads of the same type.
+  CASE_SECSAME(OverheadType::kU64, PrimaryType::kF16, uint64_t, f16);
+  CASE_SECSAME(OverheadType::kU64, PrimaryType::kBF16, uint64_t, bf16);
+  CASE_SECSAME(OverheadType::kU32, PrimaryType::kF16, uint32_t, f16);
+  CASE_SECSAME(OverheadType::kU32, PrimaryType::kBF16, uint32_t, bf16);
+  CASE_SECSAME(OverheadType::kU16, PrimaryType::kF16, uint16_t, f16);
+  CASE_SECSAME(OverheadType::kU16, PrimaryType::kBF16, uint16_t, bf16);
+  CASE_SECSAME(OverheadType::kU8, PrimaryType::kF16, uint8_t, f16);
+  CASE_SECSAME(OverheadType::kU8, PrimaryType::kBF16, uint8_t, bf16);
+
   // Integral matrices with both overheads of the same type.
   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI64, uint64_t, int64_t);
   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI32, uint64_t, int32_t);

diff  --git a/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir b/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir
index 6dafc36ab1201..32308d17d3ff5 100644
--- a/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir
@@ -28,9 +28,8 @@
 //   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<1xindex> to memref<?xindex>
 //   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<1xindex>
 //   CHECK-DAG: %[[zeroI32:.*]] = arith.constant 0 : i32
-//   CHECK-DAG: %[[ElemTp:.*]] = arith.constant 4 : i32
-//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 6 : i32
-//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[zeroI32]], %[[zeroI32]], %[[ElemTp]], %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+//   CHECK-DAG: %[[ElemTpActionToIter:.*]] = arith.constant 6 : i32
+//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[zeroI32]], %[[zeroI32]], %[[ElemTpActionToIter]], %[[ElemTpActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
 //   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<1xindex>
 //   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<1xindex> to memref<?xindex>
 //   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<i32>
@@ -67,9 +66,8 @@ func.func @sparse_convert_1d(%arg0: tensor<13xi32, #SparseVector>) -> tensor<13x
 //   CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<1xindex> to memref<?xindex>
 //   CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<1xindex>
 //   CHECK-DAG: %[[zeroI32:.*]] = arith.constant 0 : i32
-//   CHECK-DAG: %[[ElemTp:.*]] = arith.constant 4 : i32
-//   CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 6 : i32
-//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[zeroI32]], %[[zeroI32]], %[[ElemTp]], %[[ActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
+//   CHECK-DAG: %[[ElemTpActionToIter:.*]] = arith.constant 6 : i32
+//   CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[zeroI32]], %[[zeroI32]], %[[ElemTpActionToIter]], %[[ElemTpActionToIter]], %[[Arg]]) : (memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
 //   CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<1xindex>
 //   CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<1xindex> to memref<?xindex>
 //   CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref<i32>

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
new file mode 100644
index 0000000000000..ec15477bb5800
--- /dev/null
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir
@@ -0,0 +1,90 @@
+// RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
+#DenseVector = #sparse_tensor.encoding<{dimLevelType = ["dense"]}>
+
+#trait_vec_op = {
+  indexing_maps = [
+    affine_map<(i) -> (i)>,  // a (in)
+    affine_map<(i) -> (i)>,  // b (in)
+    affine_map<(i) -> (i)>   // x (out)
+  ],
+  iterator_types = ["parallel"]
+}
+
+module {
+  // Creates a dense vector using the minimum values from two input sparse vectors.
+  // When there is no overlap, include the present value in the output.
+  func.func @vector_min(%arga: tensor<?xf16, #SparseVector>,
+                        %argb: tensor<?xf16, #SparseVector>) -> tensor<?xf16, #DenseVector> {
+    %c = arith.constant 0 : index
+    %d = tensor.dim %arga, %c : tensor<?xf16, #SparseVector>
+    %xv = bufferization.alloc_tensor (%d) : tensor<?xf16, #DenseVector>
+    %0 = linalg.generic #trait_vec_op
+       ins(%arga, %argb: tensor<?xf16, #SparseVector>, tensor<?xf16, #SparseVector>)
+        outs(%xv: tensor<?xf16, #DenseVector>) {
+        ^bb(%a: f16, %b: f16, %x: f16):
+          %1 = sparse_tensor.binary %a, %b : f16, f16 to f16
+            overlap={
+              ^bb0(%a0: f16, %b0: f16):
+                %cmp = arith.cmpf "olt", %a0, %b0 : f16
+                %2 = arith.select %cmp, %a0, %b0: f16
+                sparse_tensor.yield %2 : f16
+            }
+            left=identity
+            right=identity
+          linalg.yield %1 : f16
+    } -> tensor<?xf16, #DenseVector>
+    return %0 : tensor<?xf16, #DenseVector>
+  }
+
+  // Dumps a dense vector of type f16.
+  func.func @dump_vec(%arg0: tensor<?xf16, #DenseVector>) {
+    // Dump the values array to verify only sparse contents are stored.
+    %c0 = arith.constant 0 : index
+    %d0 = arith.constant -1.0 : f16
+    %0 = sparse_tensor.values %arg0 : tensor<?xf16, #DenseVector> to memref<?xf16>
+    %1 = vector.transfer_read %0[%c0], %d0: memref<?xf16>, vector<32xf16>
+    %f1 = arith.extf %1: vector<32xf16> to vector<32xf32>
+    vector.print %f1 : vector<32xf32>
+    return
+  }
+
+  // Driver method to call and verify the kernel.
+  func.func @entry() {
+    %c0 = arith.constant 0 : index
+
+    // Setup sparse vectors.
+    %v1 = arith.constant sparse<
+       [ [0], [3], [11], [17], [20], [21], [28], [29], [31] ],
+         [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 ]
+    > : tensor<32xf16>
+    %v2 = arith.constant sparse<
+       [ [1], [3], [4], [10], [16], [18], [21], [28], [29], [31] ],
+         [11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0 ]
+    > : tensor<32xf16>
+    %sv1 = sparse_tensor.convert %v1 : tensor<32xf16> to tensor<?xf16, #SparseVector>
+    %sv2 = sparse_tensor.convert %v2 : tensor<32xf16> to tensor<?xf16, #SparseVector>
+
+    // Call the sparse vector kernel.
+    %0 = call @vector_min(%sv1, %sv2)
+       : (tensor<?xf16, #SparseVector>,
+          tensor<?xf16, #SparseVector>) -> tensor<?xf16, #DenseVector>
+
+    //
+    // Verify the result.
+    //
+    // CHECK: ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
+    call @dump_vec(%0) : (tensor<?xf16, #DenseVector>) -> ()
+
+    // Release the resources.
+    sparse_tensor.release %sv1 : tensor<?xf16, #SparseVector>
+    sparse_tensor.release %sv2 : tensor<?xf16, #SparseVector>
+    sparse_tensor.release %0 : tensor<?xf16, #DenseVector>
+    return
+  }
+}

diff  --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir
new file mode 100644
index 0000000000000..9f107cfeeb72a
--- /dev/null
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir
@@ -0,0 +1,78 @@
+ // RUN: mlir-opt %s --sparse-compiler | \
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
+// RUN: FileCheck %s
+
+!Filename = !llvm.ptr<i8>
+
+#SparseMatrix = #sparse_tensor.encoding<{
+  dimLevelType = [ "compressed", "compressed" ]
+}>
+
+#trait_sum_reduce = {
+  indexing_maps = [
+    affine_map<(i,j) -> (i,j)>, // A
+    affine_map<(i,j) -> ()>     // x (out)
+  ],
+  iterator_types = ["reduction", "reduction"],
+  doc = "x += A(i,j)"
+}
+
+module {
+  //
+  // A kernel that sum-reduces a matrix to a single scalar.
+  //
+  func.func @kernel_sum_reduce(%arga: tensor<?x?xf16, #SparseMatrix>,
+                          %argx: tensor<f16> {linalg.inplaceable = true}) -> tensor<f16> {
+    %0 = linalg.generic #trait_sum_reduce
+      ins(%arga: tensor<?x?xf16, #SparseMatrix>)
+      outs(%argx: tensor<f16>) {
+      ^bb(%a: f16, %x: f16):
+        %0 = arith.addf %x, %a : f16
+        linalg.yield %0 : f16
+    } -> tensor<f16>
+    return %0 : tensor<f16>
+  }
+
+  func.func private @getTensorFilename(index) -> (!Filename)
+
+  //
+  // Main driver that reads matrix from file and calls the sparse kernel.
+  //
+  func.func @entry() {
+    // Setup input sparse matrix from compressed constant.
+    %d = arith.constant dense <[
+       [ 1.1,  1.2,  0.0,  1.4 ],
+       [ 0.0,  0.0,  0.0,  0.0 ],
+       [ 3.1,  0.0,  3.3,  3.4 ]
+    ]> : tensor<3x4xf16>
+    %a = sparse_tensor.convert %d : tensor<3x4xf16> to tensor<?x?xf16, #SparseMatrix>
+
+    %d0 = arith.constant 0.0 : f16
+    // Setup memory for a single reduction scalar,
+    // initialized to zero.
+    %xdata = memref.alloc() : memref<f16>
+    memref.store %d0, %xdata[] : memref<f16>
+    %x = bufferization.to_tensor %xdata : memref<f16>
+
+    // Call the kernel.
+    %0 = call @kernel_sum_reduce(%a, %x)
+      : (tensor<?x?xf16, #SparseMatrix>, tensor<f16>) -> tensor<f16>
+
+    // Print the result for verification.
+    //
+    // CHECK: 13.5
+    //
+    %m = bufferization.to_memref %0 : memref<f16>
+    %v = memref.load %m[] : memref<f16>
+    %vf = arith.extf %v: f16 to f32
+    vector.print %vf : f32
+
+    // Release the resources.
+    memref.dealloc %xdata : memref<f16>
+    sparse_tensor.release %a : tensor<?x?xf16, #SparseMatrix>
+
+    return
+  }
+}

diff  --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index 44aa75beb6481..516b1ec3f9b0d 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -6415,10 +6415,12 @@ cc_library(
     name = "mlir_c_runner_utils",
     srcs = [
         "lib/ExecutionEngine/CRunnerUtils.cpp",
+        "lib/ExecutionEngine/Float16bits.cpp",
         "lib/ExecutionEngine/SparseTensorUtils.cpp",
     ],
     hdrs = [
         "include/mlir/ExecutionEngine/CRunnerUtils.h",
+        "include/mlir/ExecutionEngine/Float16bits.h",
         "include/mlir/ExecutionEngine/Msan.h",
         "include/mlir/ExecutionEngine/SparseTensorUtils.h",
     ],


        


More information about the Mlir-commits mailing list