[Mlir-commits] [mlir] b364c76 - [mlir][sparse] Using non-empty function name suffix for OverheadType::kIndex
wren romano
llvmlistbot at llvm.org
Wed Jun 1 14:18:48 PDT 2022
Author: wren romano
Date: 2022-06-01T14:18:42-07:00
New Revision: b364c76683f8ef241025a9556300778c07b590c2
URL: https://github.com/llvm/llvm-project/commit/b364c76683f8ef241025a9556300778c07b590c2
DIFF: https://github.com/llvm/llvm-project/commit/b364c76683f8ef241025a9556300778c07b590c2.diff
LOG: [mlir][sparse] Using non-empty function name suffix for OverheadType::kIndex
The trick of using an empty token in the `FOREVERY_O` x-macro relies on preprocessor behavior which is only standard since C99 6.10.3/4 and C++11 N3290 16.3/4 (whereas it was undefined behavior up through C++03 16.3/10). Since the `ExecutionEngine/SparseTensorUtils.cpp` file is required to be compile-able under C++98 compatibility mode (unlike the C++11 used elsewhere in MLIR), we shouldn't rely on that behavior.
Also, using a non-empty suffix helps improve uniformity of the API, since all other primary/overhead suffixes are also non-empty. I'm using the suffix `0` since that's the value used by the `SparseTensorEncoding` attribute for indicating the index overhead-type.
Depends On D126720
Reviewed By: aartbik
Differential Revision: https://reviews.llvm.org/D126724
Added:
Modified:
mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h
mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
mlir/test/Dialect/SparseTensor/conversion.mlir
mlir/test/Dialect/SparseTensor/sparse_lower.mlir
mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir
mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h b/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h
index 51e78c3f27fc2..0b51bc2a354f2 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h
@@ -62,14 +62,10 @@ enum class OverheadType : uint32_t {
DO(8, uint8_t)
// This x-macro calls its argument on every overhead type, including
-// `index_type`. Our naming convention uses an empty suffix for
-// `index_type`, so the missing first argument when we call `DO`
-// gets resolved to the empty token which can then be concatenated
-// as intended. (This behavior is standard per C99 6.10.3/4 and
-// C++11 N3290 16.3/4; whereas in C++03 16.3/10 it was undefined behavior.)
+// `index_type`.
#define FOREVERY_O(DO) \
FOREVERY_FIXED_O(DO) \
- DO(, index_type)
+ DO(0, index_type)
// These are not just shorthands but indicate the particular
// implementation used (e.g., as opposed to C99's `complex double`,
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
index 8bd2aceadaa95..40d4b756a10b8 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
@@ -78,14 +78,14 @@ Type mlir::sparse_tensor::getIndexOverheadType(
return getOverheadType(builder, indexOverheadTypeEncoding(enc));
}
-// TODO: Adjust the naming convention for the constructors of `OverheadType`
-// and the function-suffix for `kIndex` so we can use the `FOREVERY_O`
-// x-macro here instead of `FOREVERY_FIXED_O`; to further reduce the
-// possibility of typo bugs or things getting out of sync.
+// TODO: Adjust the naming convention for the constructors of
+// `OverheadType` so we can use the `FOREVERY_O` x-macro here instead
+// of `FOREVERY_FIXED_O`; to further reduce the possibility of typo bugs
+// or things getting out of sync.
StringRef mlir::sparse_tensor::overheadTypeFunctionSuffix(OverheadType ot) {
switch (ot) {
case OverheadType::kIndex:
- return "";
+ return "0";
#define CASE(ONAME, O) \
case OverheadType::kU##ONAME: \
return #ONAME;
diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir
index a1838141cc0ef..64d204601686e 100644
--- a/mlir/test/Dialect/SparseTensor/conversion.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion.mlir
@@ -365,7 +365,7 @@ func.func @sparse_convert_3d(%arg0: tensor<?x?x?xf64>) -> tensor<?x?x?xf64, #Spa
// CHECK-LABEL: func @sparse_pointers(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[C:.*]] = arith.constant 0 : index
-// CHECK: %[[T:.*]] = call @sparsePointers(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK: %[[T:.*]] = call @sparsePointers0(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: return %[[T]] : memref<?xindex>
func.func @sparse_pointers(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%c = arith.constant 0 : index
@@ -398,7 +398,7 @@ func.func @sparse_pointers32(%arg0: tensor<128xf64, #SparseVector32>) -> memref<
// CHECK-LABEL: func @sparse_indices(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
// CHECK: %[[C:.*]] = arith.constant 0 : index
-// CHECK: %[[T:.*]] = call @sparseIndices(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK: %[[T:.*]] = call @sparseIndices0(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK: return %[[T]] : memref<?xindex>
func.func @sparse_indices(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%c = arith.constant 0 : index
diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir
index 6b3d6448397ef..f20fcbe3c19db 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir
@@ -60,8 +60,8 @@
// CHECK-MIR-DAG: %[[VAL_3:.*]] = arith.constant 32 : index
// CHECK-MIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-MIR-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
-// CHECK-MIR-DAG: %[[VAL_6:.*]] = call @sparsePointers(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
-// CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparseIndices(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-MIR-DAG: %[[VAL_6:.*]] = call @sparsePointers0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparseIndices0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
// CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
@@ -93,8 +93,8 @@
// CHECK-LIR-DAG: %[[VAL_3:.*]] = arith.constant 32 : index
// CHECK-LIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-LIR-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
-// CHECK-LIR-DAG: %[[VAL_6:.*]] = call @sparsePointers(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
-// CHECK-LIR-DAG: %[[VAL_7:.*]] = call @sparseIndices(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-LIR-DAG: %[[VAL_6:.*]] = call @sparsePointers0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-LIR-DAG: %[[VAL_7:.*]] = call @sparseIndices0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK-LIR-DAG: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
// CHECK-LIR-DAG: %[[VAL_9:.*]] = memref.alloc() : memref<32xf64>
// CHECK-LIR: memref.copy %[[VAL_2]], %[[VAL_9]] : memref<32xf64> to memref<32xf64>
diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir
index 1b8891fd529b7..ed737f7f28e51 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir
@@ -62,8 +62,8 @@
// CHECK-MIR-DAG: %[[VAL_3:.*]] = arith.constant 64 : index
// CHECK-MIR-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK-MIR-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
-// CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparsePointers(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
-// CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseIndices(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparsePointers0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseIndices0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK-MIR-DAG: %[[VAL_9:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
// CHECK-MIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
@@ -94,8 +94,8 @@
// CHECK-LIR-DAG: %[[VAL_3:.*]] = arith.constant 64 : index
// CHECK-LIR-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
// CHECK-LIR-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
-// CHECK-LIR-DAG: %[[VAL_7:.*]] = call @sparsePointers(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
-// CHECK-LIR-DAG: %[[VAL_8:.*]] = call @sparseIndices(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-LIR-DAG: %[[VAL_7:.*]] = call @sparsePointers0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-LIR-DAG: %[[VAL_8:.*]] = call @sparseIndices0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK-LIR-DAG: %[[VAL_9:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
// CHECK-LIR-DAG: %[[VAL_10:.*]] = memref.alloc() : memref<32xf64>
// CHECK-LIR: memref.copy %[[VAL_2]], %[[VAL_10]] : memref<32xf64> to memref<32xf64>
diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir
index 1b8b0ee7c0324..b3e0d25a7548d 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir
@@ -58,8 +58,8 @@
// CHECK-MIR-DAG: %[[VAL_3:.*]] = arith.constant 32 : index
// CHECK-MIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-MIR-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
-// CHECK-MIR: %[[VAL_6:.*]] = call @sparsePointers(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
-// CHECK-MIR: %[[VAL_7:.*]] = call @sparseIndices(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-MIR: %[[VAL_6:.*]] = call @sparsePointers0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-MIR: %[[VAL_7:.*]] = call @sparseIndices0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK-MIR: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
// CHECK-MIR: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
// CHECK-MIR: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
@@ -89,8 +89,8 @@
// CHECK-LIR-DAG: %[[VAL_3:.*]] = arith.constant 32 : index
// CHECK-LIR-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
// CHECK-LIR-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
-// CHECK-LIR: %[[VAL_6:.*]] = call @sparsePointers(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
-// CHECK-LIR: %[[VAL_7:.*]] = call @sparseIndices(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-LIR: %[[VAL_6:.*]] = call @sparsePointers0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
+// CHECK-LIR: %[[VAL_7:.*]] = call @sparseIndices0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
// CHECK-LIR: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
// CHECK-LIR: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
// CHECK-LIR-DAG: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref<?xindex>
More information about the Mlir-commits
mailing list