[Mlir-commits] [mlir] [mlir][print]Add functions for printing memref f16/bf16/i16 (PR #75094)

Yinying Li llvmlistbot at llvm.org
Tue Dec 12 15:13:28 PST 2023


https://github.com/yinying-lisa-li updated https://github.com/llvm/llvm-project/pull/75094

>From 3cd37571fcb841e2290b5dffc77dd5db2731ae66 Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Mon, 11 Dec 2023 19:46:19 +0000
Subject: [PATCH 1/3] [mlir][sparse]Add functions for printing f16/bf16/i16

---
 mlir/include/mlir/ExecutionEngine/RunnerUtils.h |  8 +++++++-
 mlir/lib/ExecutionEngine/CMakeLists.txt         |  3 +++
 mlir/lib/ExecutionEngine/RunnerUtils.cpp        | 12 ++++++++++++
 3 files changed, 22 insertions(+), 1 deletion(-)

diff --git a/mlir/include/mlir/ExecutionEngine/RunnerUtils.h b/mlir/include/mlir/ExecutionEngine/RunnerUtils.h
index f998ed53b3403b..2f17e369278118 100644
--- a/mlir/include/mlir/ExecutionEngine/RunnerUtils.h
+++ b/mlir/include/mlir/ExecutionEngine/RunnerUtils.h
@@ -36,7 +36,7 @@
 #include <complex>
 #include <iomanip>
 #include <iostream>
-
+#include "mlir/ExecutionEngine/Float16bits.h"
 #include "mlir/ExecutionEngine/CRunnerUtils.h"
 
 template <typename T, typename StreamType>
@@ -369,10 +369,16 @@ _mlir_ciface_printMemrefShapeC64(UnrankedMemRefType<impl::complex64> *m);
 extern "C" MLIR_RUNNERUTILS_EXPORT void
 _mlir_ciface_printMemrefI8(UnrankedMemRefType<int8_t> *m);
 extern "C" MLIR_RUNNERUTILS_EXPORT void
+_mlir_ciface_printMemrefI16(UnrankedMemRefType<int16_t> *m);
+extern "C" MLIR_RUNNERUTILS_EXPORT void
 _mlir_ciface_printMemrefI32(UnrankedMemRefType<int32_t> *m);
 extern "C" MLIR_RUNNERUTILS_EXPORT void
 _mlir_ciface_printMemrefI64(UnrankedMemRefType<int64_t> *m);
 extern "C" MLIR_RUNNERUTILS_EXPORT void
+_mlir_ciface_printMemrefF16(UnrankedMemRefType<f16> *m);
+extern "C" MLIR_RUNNERUTILS_EXPORT void
+_mlir_ciface_printMemrefBF16(UnrankedMemRefType<bf16> *m);
+extern "C" MLIR_RUNNERUTILS_EXPORT void
 _mlir_ciface_printMemrefF32(UnrankedMemRefType<float> *m);
 extern "C" MLIR_RUNNERUTILS_EXPORT void
 _mlir_ciface_printMemrefF64(UnrankedMemRefType<double> *m);
diff --git a/mlir/lib/ExecutionEngine/CMakeLists.txt b/mlir/lib/ExecutionEngine/CMakeLists.txt
index 70c5a07ad1ab23..2f391b7698cbb0 100644
--- a/mlir/lib/ExecutionEngine/CMakeLists.txt
+++ b/mlir/lib/ExecutionEngine/CMakeLists.txt
@@ -157,6 +157,9 @@ if(LLVM_ENABLE_PIC)
     RunnerUtils.cpp
 
     EXCLUDE_FROM_LIBMLIR
+
+    LINK_LIBS PUBLIC
+    mlir_float16_utils
   )
   target_compile_definitions(mlir_runner_utils PRIVATE mlir_runner_utils_EXPORTS)
 
diff --git a/mlir/lib/ExecutionEngine/RunnerUtils.cpp b/mlir/lib/ExecutionEngine/RunnerUtils.cpp
index bbfd3a6b11c2a1..378aa7ce35ef16 100644
--- a/mlir/lib/ExecutionEngine/RunnerUtils.cpp
+++ b/mlir/lib/ExecutionEngine/RunnerUtils.cpp
@@ -81,6 +81,10 @@ extern "C" void _mlir_ciface_printMemrefI8(UnrankedMemRefType<int8_t> *M) {
   impl::printMemRef(*M);
 }
 
+extern "C" void _mlir_ciface_printMemrefI16(UnrankedMemRefType<int16_t> *M) {
+  impl::printMemRef(*M);
+}
+
 extern "C" void _mlir_ciface_printMemrefI32(UnrankedMemRefType<int32_t> *M) {
   impl::printMemRef(*M);
 }
@@ -89,6 +93,14 @@ extern "C" void _mlir_ciface_printMemrefI64(UnrankedMemRefType<int64_t> *M) {
   impl::printMemRef(*M);
 }
 
+extern "C" void _mlir_ciface_printMemrefF16(UnrankedMemRefType<f16> *M) {
+  impl::printMemRef(*M);
+}
+
+extern "C" void _mlir_ciface_printMemrefBF16(UnrankedMemRefType<bf16> *M) {
+  impl::printMemRef(*M);
+}
+
 extern "C" void _mlir_ciface_printMemrefF32(UnrankedMemRefType<float> *M) {
   impl::printMemRef(*M);
 }

>From 9626fc6d1966aa2a2330841a916ee17e8b3be021 Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Mon, 11 Dec 2023 20:25:23 +0000
Subject: [PATCH 2/3] fix format

---
 mlir/include/mlir/ExecutionEngine/RunnerUtils.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mlir/include/mlir/ExecutionEngine/RunnerUtils.h b/mlir/include/mlir/ExecutionEngine/RunnerUtils.h
index 2f17e369278118..ffa506bd0b0416 100644
--- a/mlir/include/mlir/ExecutionEngine/RunnerUtils.h
+++ b/mlir/include/mlir/ExecutionEngine/RunnerUtils.h
@@ -31,13 +31,13 @@
 #define MLIR_RUNNERUTILS_EXPORT __attribute__((visibility("default")))
 #endif // _WIN32
 
+#include "mlir/ExecutionEngine/CRunnerUtils.h"
+#include "mlir/ExecutionEngine/Float16bits.h"
 #include <assert.h>
 #include <cmath>
 #include <complex>
 #include <iomanip>
 #include <iostream>
-#include "mlir/ExecutionEngine/Float16bits.h"
-#include "mlir/ExecutionEngine/CRunnerUtils.h"
 
 template <typename T, typename StreamType>
 void printMemRefMetaData(StreamType &os, const DynamicMemRefType<T> &v) {

>From d5b102d334a03528f7756a721465a6f1d1f43ffc Mon Sep 17 00:00:00 2001
From: Yinying Li <yinyingli at google.com>
Date: Tue, 12 Dec 2023 23:05:01 +0000
Subject: [PATCH 3/3] add new test

---
 .../Dialect/Memref/print-memref.mlir          | 129 ++++++++++++++++++
 1 file changed, 129 insertions(+)
 create mode 100644 mlir/test/Integration/Dialect/Memref/print-memref.mlir

diff --git a/mlir/test/Integration/Dialect/Memref/print-memref.mlir b/mlir/test/Integration/Dialect/Memref/print-memref.mlir
new file mode 100644
index 00000000000000..78f9a85c21ecf4
--- /dev/null
+++ b/mlir/test/Integration/Dialect/Memref/print-memref.mlir
@@ -0,0 +1,129 @@
+// RUN: mlir-opt %s \
+// RUN:   -func-bufferize -arith-bufferize --canonicalize \
+// RUN:   -finalize-memref-to-llvm\
+// RUN:   -convert-func-to-llvm -reconcile-unrealized-casts |\
+// RUN: mlir-cpu-runner \
+// RUN:  -e entry -entry-point-result=void  \
+// RUN:  -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils |\
+// RUN: FileCheck %s
+
+module {
+
+  func.func private @printMemrefI8(%ptr : tensor<*xi8>) attributes { llvm.emit_c_interface }
+  func.func private @printMemrefI16(%ptr : tensor<*xi16>) attributes { llvm.emit_c_interface }
+  func.func private @printMemrefI32(%ptr : tensor<*xi32>) attributes { llvm.emit_c_interface }
+  func.func private @printMemrefI64(%ptr : tensor<*xi64>) attributes { llvm.emit_c_interface }
+  func.func private @printMemrefBF16(%ptr : tensor<*xbf16>) attributes { llvm.emit_c_interface }
+  func.func private @printMemrefF16(%ptr : tensor<*xf16>) attributes { llvm.emit_c_interface }
+  func.func private @printMemrefF32(%ptr : tensor<*xf32>) attributes { llvm.emit_c_interface }
+  func.func private @printMemrefF64(%ptr : tensor<*xf64>) attributes { llvm.emit_c_interface }
+  func.func private @printMemrefC32(%ptr : tensor<*xcomplex<f32>>) attributes { llvm.emit_c_interface }
+  func.func private @printMemrefC64(%ptr : tensor<*xcomplex<f64>>) attributes { llvm.emit_c_interface }
+  func.func private @printMemrefInd(%ptr : tensor<*xindex>) attributes { llvm.emit_c_interface }
+
+  func.func @entry() {
+    %i8 = arith.constant dense<90> : tensor<3x3xi8>
+    %i16 = arith.constant dense<1> : tensor<3x3xi16>
+    %i32 = arith.constant dense<2> : tensor<3x3xi32>
+    %i64 = arith.constant dense<3> : tensor<3x3xi64>
+    %f16 = arith.constant dense<1.5> : tensor<3x3xf16>
+    %bf16 = arith.constant dense<2.5> : tensor<3x3xbf16>
+    %f32 = arith.constant dense<3.5> : tensor<3x3xf32>
+    %f64 = arith.constant dense<4.5> : tensor<3x3xf64>
+    %c32 = arith.constant dense<(1.000000e+01,5.000000e+00)> : tensor<3x3xcomplex<f32>>
+    %c64 = arith.constant dense<(2.000000e+01,5.000000e+00)> : tensor<3x3xcomplex<f64>>
+    %ind = arith.constant dense<4> : tensor<3x3xindex>
+
+    %1 = tensor.cast %i8 : tensor<3x3xi8> to tensor<*xi8>
+    %2 = tensor.cast %i16 : tensor<3x3xi16> to tensor<*xi16>
+    %3 = tensor.cast %i32 : tensor<3x3xi32> to tensor<*xi32>
+    %4 = tensor.cast %i64 : tensor<3x3xi64> to tensor<*xi64>
+    %5 = tensor.cast %f16 : tensor<3x3xf16> to tensor<*xf16>
+    %6 = tensor.cast %bf16 : tensor<3x3xbf16> to tensor<*xbf16>
+    %7 = tensor.cast %f32 : tensor<3x3xf32> to tensor<*xf32>
+    %8 = tensor.cast %f64 : tensor<3x3xf64> to tensor<*xf64>
+    %9 = tensor.cast %c32 : tensor<3x3xcomplex<f32>> to tensor<*xcomplex<f32>>
+    %10 = tensor.cast %c64 : tensor<3x3xcomplex<f64>> to tensor<*xcomplex<f64>>
+    %11 = tensor.cast %ind : tensor<3x3xindex> to tensor<*xindex>
+
+    // CHECK:      data = 
+    // CHECK-NEXT: {{\[}}[Z,   Z,   Z],
+    // CHECK-NEXT:       [Z,   Z,   Z],
+    // CHECK-NEXT:       [Z,   Z,   Z]]
+    //
+    call @printMemrefI8(%1) : (tensor<*xi8>) -> ()
+
+    // CHECK-NEXT: data = 
+    // CHECK-NEXT: {{\[}}[1,   1,   1],
+    // CHECK-NEXT:       [1,   1,   1],
+    // CHECK-NEXT:       [1,   1,   1]]
+    //
+    call @printMemrefI16(%2) : (tensor<*xi16>) -> ()
+
+    // CHECK-NEXT: data = 
+    // CHECK-NEXT: {{\[}}[2,   2,   2],
+    // CHECK-NEXT:       [2,   2,   2],
+    // CHECK-NEXT:       [2,   2,   2]]
+    //
+    call @printMemrefI32(%3) : (tensor<*xi32>) -> ()
+
+    // CHECK-NEXT: data =
+    // CHECK-NEXT: {{\[}}[3,   3,   3],
+    // CHECK-NEXT:       [3,   3,   3],
+    // CHECK-NEXT:       [3,   3,   3]]
+    //
+    call @printMemrefI64(%4) : (tensor<*xi64>) -> ()
+
+    // CHECK-NEXT: data = 
+    // CHECK-NEXT: {{\[}}[1.5,   1.5,   1.5],
+    // CHECK-NEXT:       [1.5,   1.5,   1.5],
+    // CHECK-NEXT:       [1.5,   1.5,   1.5]]
+    //
+    call @printMemrefF16(%5) : (tensor<*xf16>) -> ()
+
+    // CHECK-NEXT: data = 
+    // CHECK-NEXT: {{\[}}[2.5,   2.5,   2.5],
+    // CHECK-NEXT:       [2.5,   2.5,   2.5],
+    // CHECK-NEXT:       [2.5,   2.5,   2.5]]
+    //
+    call @printMemrefBF16(%6) : (tensor<*xbf16>) -> ()
+
+    // CHECK-NEXT: data = 
+    // CHECK-NEXT: {{\[}}[3.5,   3.5,   3.5],
+    // CHECK-NEXT:       [3.5,   3.5,   3.5],
+    // CHECK-NEXT:       [3.5,   3.5,   3.5]]
+    //
+    call @printMemrefF32(%7) : (tensor<*xf32>) -> ()
+
+    // CHECK-NEXT: data = 
+    // CHECK-NEXT: {{\[}}[4.5,   4.5,   4.5],
+    // CHECK-NEXT:       [4.5,   4.5,   4.5],
+    // CHECK-NEXT:       [4.5,   4.5,   4.5]]
+    //
+    call @printMemrefF64(%8) : (tensor<*xf64>) -> ()
+
+    // CHECK-NEXT: data = 
+    // CHECK-NEXT: {{\[}}[(10,5), (10,5), (10,5)],
+    // CHECK-NEXT:       [(10,5), (10,5), (10,5)],
+    // CHECK-NEXT:       [(10,5), (10,5), (10,5)]]
+    //
+    call @printMemrefC32(%9) : (tensor<*xcomplex<f32>>) -> ()
+
+    // CHECK-NEXT: data = 
+    // CHECK-NEXT: {{\[}}[(20,5), (20,5), (20,5)],
+    // CHECK-NEXT:       [(20,5), (20,5), (20,5)],
+    // CHECK-NEXT:       [(20,5), (20,5), (20,5)]]
+    //
+    call @printMemrefC64(%10) : (tensor<*xcomplex<f64>>) -> ()
+
+    // CHECK-NEXT: data = 
+    // CHECK-NEXT: {{\[}}[4,   4,   4],
+    // CHECK-NEXT:       [4,   4,   4],
+    // CHECK-NEXT:       [4,   4,   4]]
+    //
+    call @printMemrefInd(%11) : (tensor<*xindex>) -> ()
+
+    return
+
+  }
+}
\ No newline at end of file



More information about the Mlir-commits mailing list