[Mlir-commits] [mlir] 840e2ba - [mlir][sparse] Use DLT in the mangled function names for insertion.

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Dec 28 08:21:28 PST 2022


Author: bixia1
Date: 2022-12-28T08:21:22-08:00
New Revision: 840e2ba3368568b84282c97b60beaa4a63df71d3

URL: https://github.com/llvm/llvm-project/commit/840e2ba3368568b84282c97b60beaa4a63df71d3
DIFF: https://github.com/llvm/llvm-project/commit/840e2ba3368568b84282c97b60beaa4a63df71d3.diff

LOG: [mlir][sparse] Use DLT in the mangled function names for insertion.

Reviewed By: aartbik

Differential Revision: https://reviews.llvm.org/D140484

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
    mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
    mlir/test/Dialect/SparseTensor/codegen.mlir
    mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index 3283a0f88b0c5..9ed209517a111 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -171,25 +171,25 @@ inline std::string toMLIRString(DimLevelType dlt) {
   switch (dlt) {
   // TODO: should probably raise an error instead of printing it...
   case DimLevelType::Undef:
-    return "\"undef\"";
+    return "undef";
   case DimLevelType::Dense:
-    return "\"dense\"";
+    return "dense";
   case DimLevelType::Compressed:
-    return "\"compressed\"";
+    return "compressed";
   case DimLevelType::CompressedNu:
-    return "\"compressed-nu\"";
+    return "compressed-nu";
   case DimLevelType::CompressedNo:
-    return "\"compressed-no\"";
+    return "compressed-no";
   case DimLevelType::CompressedNuNo:
-    return "\"compressed-nu-no\"";
+    return "compressed-nu-no";
   case DimLevelType::Singleton:
-    return "\"singleton\"";
+    return "singleton";
   case DimLevelType::SingletonNu:
-    return "\"singleton-nu\"";
+    return "singleton-nu";
   case DimLevelType::SingletonNo:
-    return "\"singleton-no\"";
+    return "singleton-no";
   case DimLevelType::SingletonNuNo:
-    return "\"singleton-nu-no\"";
+    return "singleton-nu-no";
   }
   return "";
 }

diff  --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index f28abee047fd5..ccb4a88453e1b 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -174,7 +174,7 @@ void SparseTensorEncodingAttr::print(AsmPrinter &printer) const {
   // Print the struct-like storage in dictionary fashion.
   printer << "<{ dimLevelType = [ ";
   for (unsigned i = 0, e = getDimLevelType().size(); i < e; i++) {
-    printer << toMLIRString(getDimLevelType()[i]);
+    printer << "\"" << toMLIRString(getDimLevelType()[i]) << "\"";
     if (i != e - 1)
       printer << ", ";
   }

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 710d6cec31dd9..e3bcfe8117da2 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -22,6 +22,7 @@
 #include "mlir/Dialect/Func/IR/FuncOps.h"
 #include "mlir/Dialect/Linalg/Utils/Utils.h"
 #include "mlir/Dialect/MemRef/IR/MemRef.h"
+#include "mlir/Dialect/SparseTensor/IR/Enums.h"
 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
 #include "mlir/Dialect/Tensor/IR/Tensor.h"
@@ -438,7 +439,7 @@ static void genInsertionCallHelper(OpBuilder &builder,
                                    StringRef namePrefix,
                                    FuncGeneratorType createFunc) {
   // The mangled name of the function has this format:
-  //   <namePrefix>_[C|S|D]_<shape>_<ordering>_<eltType>
+  //   <namePrefix>_<DLT>_<shape>_<ordering>_<eltType>
   //     _<indexBitWidth>_<pointerBitWidth>
   RankedTensorType rtp = desc.getTensorType();
   SmallString<32> nameBuffer;
@@ -447,13 +448,7 @@ static void genInsertionCallHelper(OpBuilder &builder,
   unsigned rank = rtp.getShape().size();
   assert(rank == indices.size());
   for (unsigned d = 0; d < rank; d++) {
-    if (isCompressedDim(rtp, d)) {
-      nameOstream << "C_";
-    } else if (isSingletonDim(rtp, d)) {
-      nameOstream << "S_";
-    } else {
-      nameOstream << "D_";
-    }
+    nameOstream << toMLIRString(getDimLevelType(rtp, d)) << "_";
   }
   // Static dim sizes are used in the generated code while dynamic sizes are
   // loaded from the dimSizes buffer. This is the reason for adding the shape

diff  --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir
index b60935c2913c5..db7e5065b7484 100644
--- a/mlir/test/Dialect/SparseTensor/codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/codegen.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s --sparse-tensor-codegen  --canonicalize --cse | FileCheck %s
+// RUN: mlir-opt %s --sparse-tensor-codegen  --canonicalize -cse | FileCheck %s
 
 #SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>
 
@@ -46,6 +46,10 @@
   dimOrdering = affine_map<(i, j, k) -> (k, i, j)>
 }>
 
+#Coo = #sparse_tensor.encoding<{
+  dimLevelType = [ "compressed-nu", "singleton" ]
+}>
+
 // CHECK-LABEL: func @sparse_nop(
 //  CHECK-SAME: %[[A0:.*]]: memref<?xi32>,
 //  CHECK-SAME: %[[A1:.*]]: memref<?xi64>,
@@ -355,7 +359,7 @@ func.func @sparse_expansion3(%arg0: index, %arg1: index) -> memref<?xindex> {
   return %added : memref<?xindex>
 }
 
-// CHECK-LABEL: func.func private @_insert_C_100_f64_0_0(
+// CHECK-LABEL: func.func private @_insert_compressed_100_f64_0_0(
 //  CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
 //  CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
 //  CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
@@ -380,7 +384,7 @@ func.func @sparse_expansion3(%arg0: index, %arg1: index) -> memref<?xindex> {
 //       CHECK: %[[A12:.*]]:4 = scf.for %[[A13:.*]] = %[[A11]] to %[[A7]] step %[[A10]] iter_args(%[[A14:.*]] = %[[A0]], %[[A15:.*]] = %[[A1]], %[[A16:.*]] = %[[A2]], %[[A17:.*]] = %[[A3]])
 //       CHECK:   %[[A18:.*]] = memref.load %[[A6]]{{\[}}%[[A13]]] : memref<?xindex>
 //       CHECK:   %[[A19:.*]] = memref.load %[[A4]]{{\[}}%[[A18]]] : memref<?xf64>
-//       CHECK:   %[[A20:.*]]:4 = func.call @_insert_C_100_f64_0_0(%[[A14]], %[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A19]])
+//       CHECK:   %[[A20:.*]]:4 = func.call @_insert_compressed_100_f64_0_0(%[[A14]], %[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A19]])
 //       CHECK:   memref.store %[[A9]], %[[A4]]{{\[}}%[[A18]]] : memref<?xf64>
 //       CHECK:   memref.store %[[A8]], %[[A5]]{{\[}}%[[A18]]] : memref<?xi1>
 //       CHECK:   scf.yield %[[A20]]#0, %[[A20]]#1, %[[A20]]#2, %[[A20]]#3
@@ -400,7 +404,7 @@ func.func @sparse_compression_1d(%tensor: tensor<100xf64, #SV>,
   return %1 : tensor<100xf64, #SV>
 }
 
-// CHECK-LABEL: func.func private @_insert_D_C_8_8_f64_64_32(
+// CHECK-LABEL: func.func private @_insert_dense_compressed_8_8_f64_64_32(
 //  CHECK-SAME: %[[A1:.*0]]: memref<?xi32>,
 //  CHECK-SAME: %[[A2:.*1]]: memref<?xi64>,
 //  CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
@@ -428,7 +432,7 @@ func.func @sparse_compression_1d(%tensor: tensor<100xf64, #SV>,
 //       CHECK:     %[[A14:.*]]:4 = scf.for %[[A15:.*]] = %[[A13]] to %[[A7]] step %[[A12]] iter_args(%[[A16:.*]] = %[[A0]], %[[A17:.*]] = %[[A1]], %[[A18:.*]] = %[[A2]], %[[A19:.*]] = %[[A3]]) -> (memref<?xi32>, memref<?xi64>, memref<?xf64>, !sparse_tensor.storage_specifier
 //       CHECK:       %[[A20:.*]] = memref.load %[[A6]]{{\[}}%[[A15]]] : memref<?xindex>
 //       CHECK:       %[[A21:.*]] = memref.load %[[A4]]{{\[}}%[[A20]]] : memref<?xf64>
-//       CHECK:       %[[A22:.*]]:4 = func.call @_insert_D_C_8_8_f64_64_32(%[[A16]], %[[A17]], %[[A18]], %[[A19]], %[[A8]], %[[A20]], %[[A21]]) : (memref<?xi32>, memref<?xi64>, memref<?xf64>, !sparse_tensor.storage_specifier
+//       CHECK:       %[[A22:.*]]:4 = func.call @_insert_dense_compressed_8_8_f64_64_32(%[[A16]], %[[A17]], %[[A18]], %[[A19]], %[[A8]], %[[A20]], %[[A21]]) : (memref<?xi32>, memref<?xi64>, memref<?xf64>, !sparse_tensor.storage_specifier
 //       CHECK:       memref.store %[[A11]], %[[A4]]{{\[}}%[[A20]]] : memref<?xf64>
 //       CHECK:       memref.store %[[A10]], %[[A5]]{{\[}}%[[A20]]] : memref<?xi1>
 //       CHECK:       scf.yield %[[A22]]#0, %[[A22]]#1, %[[A22]]#2, %[[A22]]#3 : memref<?xi32>, memref<?xi64>, memref<?xf64>, !sparse_tensor.storage_specifier
@@ -461,7 +465,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>,
   return %1 : tensor<8x8xf64, #CSR>
 }
 
-// CHECK-LABEL: func.func private @_insert_D_C_8_8_f64_0_0(
+// CHECK-LABEL: func.func private @"_insert_dense_compressed-no_8_8_f64_0_0"(
 //  CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
 //  CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
 //  CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
@@ -487,7 +491,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>,
 //       CHECK:     %[[A13:.*]]:4 = scf.for %[[A14:.*]] = %[[A11]] to %[[A7]] step %[[A12]] iter_args(%[[A15:.*]] = %[[A0]], %[[A16:.*]] = %[[A1]], %[[A17:.*]] = %[[A2]], %[[A18:.*]] = %[[A3]]) -> (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
 //       CHECK:       %[[A19:.*]] = memref.load %[[A6]]{{\[}}%[[A14]]] : memref<?xindex>
 //       CHECK:       %[[A20:.*]] = memref.load %[[A4]]{{\[}}%[[A19]]] : memref<?xf64>
-//       CHECK:       %[[A21:.*]]:4 = func.call @_insert_D_C_8_8_f64_0_0(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
+//       CHECK:       %[[A21:.*]]:4 = func.call @"_insert_dense_compressed-no_8_8_f64_0_0"(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
 //       CHECK:       memref.store %[[A10]], %[[A4]]{{\[}}%[[A19]]] : memref<?xf64>
 //       CHECK:       memref.store %[[A9]], %[[A5]]{{\[}}%[[A19]]] : memref<?xi1>
 //       CHECK:       scf.yield %[[A21]]#0, %[[A21]]#1, %[[A21]]#2, %[[A21]]#3 : memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
@@ -520,7 +524,7 @@ func.func @sparse_compression_unordered(%tensor: tensor<8x8xf64, #UCSR>,
   return %1 : tensor<8x8xf64, #UCSR>
 }
 
-// CHECK-LABEL: func.func private @_insert_C_128_f64_0_0(
+// CHECK-LABEL: func.func private @_insert_compressed_128_f64_0_0(
 //  CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
 //  CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
 //  CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
@@ -535,7 +539,7 @@ func.func @sparse_compression_unordered(%tensor: tensor<8x8xf64, #UCSR>,
 //  CHECK-SAME: %[[A4:.*3]]: !sparse_tensor.storage_specifier
 //  CHECK-SAME: %[[A5:.*4]]: index,
 //  CHECK-SAME: %[[A6:.*5]]: f64)
-//       CHECK: %[[R:.*]]:4 = call @_insert_C_128_f64_0_0(%[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A5]], %[[A6]])
+//       CHECK: %[[R:.*]]:4 = call @_insert_compressed_128_f64_0_0(%[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A5]], %[[A6]])
 //       CHECK: return %[[R]]#0, %[[R]]#1, %[[R]]#2, %[[R]]#3
 func.func @sparse_insert(%arg0: tensor<128xf64, #SV>, %arg1: index, %arg2: f64) -> tensor<128xf64, #SV> {
   %0 = sparse_tensor.insert %arg2 into %arg0[%arg1] : tensor<128xf64, #SV>
@@ -543,7 +547,7 @@ func.func @sparse_insert(%arg0: tensor<128xf64, #SV>, %arg1: index, %arg2: f64)
   return %1 : tensor<128xf64, #SV>
 }
 
-// CHECK-LABEL: func.func private @_insert_C_128_f64_64_32(
+// CHECK-LABEL: func.func private @_insert_compressed_128_f64_64_32(
 //  CHECK-SAME: %[[A1:.*]]: memref<?xi32>,
 //  CHECK-SAME: %[[A2:.*]]: memref<?xi64>,
 //  CHECK-SAME: %[[A3:.*]]: memref<?xf64>,
@@ -558,7 +562,7 @@ func.func @sparse_insert(%arg0: tensor<128xf64, #SV>, %arg1: index, %arg2: f64)
 //  CHECK-SAME: %[[A4:.*]]: !sparse_tensor.storage_specifier
 //  CHECK-SAME: %[[A5:.*]]: index,
 //  CHECK-SAME: %[[A6:.*]]: f64)
-//       CHECK: %[[R:.*]]:4 = call @_insert_C_128_f64_64_32(%[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A5]], %[[A6]])
+//       CHECK: %[[R:.*]]:4 = call @_insert_compressed_128_f64_64_32(%[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A5]], %[[A6]])
 //       CHECK: return %[[R]]#0, %[[R]]#1, %[[R]]#2, %[[R]]#3
 func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: index, %arg2: f64) -> tensor<128xf64, #SparseVector> {
   %0 = sparse_tensor.insert %arg2 into %arg0[%arg1] : tensor<128xf64, #SparseVector>
@@ -566,6 +570,32 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind
   return %1 : tensor<128xf64, #SparseVector>
 }
 
+// CHECK-LABEL: func.func private @"_insert_compressed-nu_singleton_5_6_f64_0_0"(
+//  CHECK-SAME: %[[A0:.*0]]: memref<?xindex>,
+//  CHECK-SAME: %[[A1:.*1]]: memref<?xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xindex>,
+//  CHECK-SAME: %[[A3:.*3]]: memref<?xf64>,
+//  CHECK-SAME: %[[A4:.*4]]: !sparse_tensor.storage_specifier
+//  CHECK-SAME: %[[A5:.*5]]: index,
+//  CHECK-SAME: %[[A5:.*6]]: index,
+//  CHECK-SAME: %[[A7:.*7]]: f64)
+//
+// CHECK-LABEL: func.func @sparse_insert_coo(
+//  CHECK-SAME: %[[A0:.*0]]: memref<?xindex>,
+//  CHECK-SAME: %[[A1:.*1]]: memref<?xindex>,
+//  CHECK-SAME: %[[A2:.*2]]: memref<?xindex>,
+//  CHECK-SAME: %[[A3:.*3]]: memref<?xf64>,
+//  CHECK-SAME: %[[A4:.*4]]: !sparse_tensor.storage_specifier
+//  CHECK-SAME: %[[A5:.*5]]: index,
+//  CHECK-SAME: %[[A6:.*6]]: f64)
+//       CHECK: %[[R:.*]]:5 = call @"_insert_compressed-nu_singleton_5_6_f64_0_0"(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A5]], %[[A5]], %[[A6]])
+//       CHECK: return %[[R]]#0, %[[R]]#1, %[[R]]#2, %[[R]]#3, %[[R]]#4
+func.func @sparse_insert_coo(%arg0: tensor<5x6xf64, #Coo>, %arg1: index, %arg2: f64) -> tensor<5x6xf64, #Coo> {
+  %0 = sparse_tensor.insert %arg2 into %arg0[%arg1, %arg1] : tensor<5x6xf64, #Coo>
+  %1 = sparse_tensor.load %0 hasInserts : tensor<5x6xf64, #Coo>
+  return %1 : tensor<5x6xf64, #Coo>
+}
+
 // CHECK-LABEL: func.func @sparse_nop_convert(
 //  CHECK-SAME: %[[A1:.*]]: memref<?xi32>,
 //  CHECK-SAME: %[[A2:.*]]: memref<?xi64>,

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir b/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir
index 90fa3a5509965..90247c33ec529 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir
@@ -12,7 +12,7 @@
 //
 // Computes C = A x B with all matrices sparse (SpMSpM) in CSR.
 //
-// CHECK-LABEL:   func.func private @_insert_D_C_4_4_f64_0_0(
+// CHECK-LABEL:   func.func private @_insert_dense_compressed_4_4_f64_0_0(
 // CHECK-SAME:      %[[VAL_0:.*0]]: memref<?xindex>,
 // CHECK-SAME:      %[[VAL_1:.*1]]: memref<?xindex>,
 // CHECK-SAME:      %[[VAL_2:.*2]]: memref<?xf64>,
@@ -129,7 +129,7 @@
 // CHECK:             %[[VAL_72:.*]]:4 = scf.for %[[VAL_73:.*]] = %[[VAL_11]] to %[[VAL_71]] step %[[VAL_12]] iter_args(%[[VAL_74:.*]] = %[[VAL_42]], %[[VAL_75:.*]] = %[[VAL_43]], %[[VAL_76:.*]] = %[[VAL_44]], %[[VAL_77:.*]] = %[[VAL_45]]) -> (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
 // CHECK:               %[[VAL_78:.*]] = memref.load %[[VAL_38]]{{\[}}%[[VAL_73]]] : memref<4xindex>
 // CHECK:               %[[VAL_79:.*]] = memref.load %[[VAL_36]]{{\[}}%[[VAL_78]]] : memref<4xf64>
-// CHECK:               %[[VAL_80:.*]]:4 = func.call @_insert_D_C_4_4_f64_0_0(%[[VAL_74]], %[[VAL_75]], %[[VAL_76]], %[[VAL_77]], %[[VAL_41]], %[[VAL_78]], %[[VAL_79]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
+// CHECK:               %[[VAL_80:.*]]:4 = func.call @_insert_dense_compressed_4_4_f64_0_0(%[[VAL_74]], %[[VAL_75]], %[[VAL_76]], %[[VAL_77]], %[[VAL_41]], %[[VAL_78]], %[[VAL_79]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
 // CHECK:               memref.store %[[VAL_10]], %[[VAL_36]]{{\[}}%[[VAL_78]]] : memref<4xf64>
 // CHECK:               memref.store %[[VAL_13]], %[[VAL_37]]{{\[}}%[[VAL_78]]] : memref<4xi1>
 // CHECK:               scf.yield %[[VAL_80]]#0, %[[VAL_80]]#1, %[[VAL_80]]#2, %[[VAL_80]]#3 : memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier


        


More information about the Mlir-commits mailing list