[Mlir-commits] [mlir] 387755a - [mlir][sparse] Simplifying SparseTensorEncodingAttr function arguments

wren romano llvmlistbot at llvm.org
Mon Dec 12 17:06:04 PST 2022


Author: wren romano
Date: 2022-12-12T17:05:56-08:00
New Revision: 387755a35d1260e65b4ed6811b492e021a05fe52

URL: https://github.com/llvm/llvm-project/commit/387755a35d1260e65b4ed6811b492e021a05fe52
DIFF: https://github.com/llvm/llvm-project/commit/387755a35d1260e65b4ed6811b492e021a05fe52.diff

LOG: [mlir][sparse] Simplifying SparseTensorEncodingAttr function arguments

Since STEA isa Attribute, and that's just (a wrapper around) a pointer, the extra `const` and `&` aren't necessary for function arguments.

Reviewed By: aartbik

Differential Revision: https://reviews.llvm.org/D139886

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
    mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
    mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
    mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
index e9b63a6f6da1b..c1bd7b40d6548 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h
@@ -46,8 +46,7 @@ bool isUniqueCOOType(RankedTensorType tp);
 // And therefore all functions calling it cannot be constexpr either.
 // TODO: since Clang does allow these to be constexpr, perhaps we should
 // define a macro to abstract over `inline` vs `constexpr` annotations.
-inline DimLevelType getDimLevelType(const SparseTensorEncodingAttr &enc,
-                                    uint64_t d) {
+inline DimLevelType getDimLevelType(SparseTensorEncodingAttr enc, uint64_t d) {
   if (enc) {
     auto types = enc.getDimLevelType();
     assert(d < types.size() && "Dimension out of bounds");
@@ -110,8 +109,8 @@ inline bool isUniqueDim(RankedTensorType type, uint64_t d) {
 // Reordering.
 //
 
-uint64_t toOrigDim(const SparseTensorEncodingAttr &enc, uint64_t d);
-uint64_t toStoredDim(const SparseTensorEncodingAttr &enc, uint64_t d);
+uint64_t toOrigDim(SparseTensorEncodingAttr enc, uint64_t d);
+uint64_t toStoredDim(SparseTensorEncodingAttr enc, uint64_t d);
 
 /// Convenience method to translate the given stored dimension
 /// to the original dimension (0 <= d < rank).

diff  --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 44e52207537fe..55b541446ebdc 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -294,7 +294,7 @@ bool mlir::sparse_tensor::isUniqueCOOType(RankedTensorType tp) {
   return isUniqueDim(tp, tp.getRank() - 1);
 }
 
-uint64_t mlir::sparse_tensor::toOrigDim(const SparseTensorEncodingAttr &enc,
+uint64_t mlir::sparse_tensor::toOrigDim(SparseTensorEncodingAttr enc,
                                         uint64_t d) {
   if (enc) {
     auto order = enc.getDimOrdering();
@@ -306,7 +306,7 @@ uint64_t mlir::sparse_tensor::toOrigDim(const SparseTensorEncodingAttr &enc,
   return d;
 }
 
-uint64_t mlir::sparse_tensor::toStoredDim(const SparseTensorEncodingAttr &enc,
+uint64_t mlir::sparse_tensor::toStoredDim(SparseTensorEncodingAttr enc,
                                           uint64_t d) {
   if (enc) {
     auto order = enc.getDimOrdering();

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
index 6052fceeb397d..ede9c56b7b707 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
@@ -880,23 +880,23 @@ Type mlir::sparse_tensor::getOverheadType(Builder &builder, OverheadType ot) {
   llvm_unreachable("Unknown OverheadType");
 }
 
-OverheadType mlir::sparse_tensor::pointerOverheadTypeEncoding(
-    const SparseTensorEncodingAttr &enc) {
+OverheadType
+mlir::sparse_tensor::pointerOverheadTypeEncoding(SparseTensorEncodingAttr enc) {
   return overheadTypeEncoding(enc.getPointerBitWidth());
 }
 
-OverheadType mlir::sparse_tensor::indexOverheadTypeEncoding(
-    const SparseTensorEncodingAttr &enc) {
+OverheadType
+mlir::sparse_tensor::indexOverheadTypeEncoding(SparseTensorEncodingAttr enc) {
   return overheadTypeEncoding(enc.getIndexBitWidth());
 }
 
-Type mlir::sparse_tensor::getPointerOverheadType(
-    Builder &builder, const SparseTensorEncodingAttr &enc) {
+Type mlir::sparse_tensor::getPointerOverheadType(Builder &builder,
+                                                 SparseTensorEncodingAttr enc) {
   return getOverheadType(builder, pointerOverheadTypeEncoding(enc));
 }
 
-Type mlir::sparse_tensor::getIndexOverheadType(
-    Builder &builder, const SparseTensorEncodingAttr &enc) {
+Type mlir::sparse_tensor::getIndexOverheadType(Builder &builder,
+                                               SparseTensorEncodingAttr enc) {
   return getOverheadType(builder, indexOverheadTypeEncoding(enc));
 }
 

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
index e80846648254b..4d5805e6de93e 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
@@ -48,18 +48,16 @@ OverheadType overheadTypeEncoding(Type tp);
 Type getOverheadType(Builder &builder, OverheadType ot);
 
 /// Returns the OverheadType for pointer overhead storage.
-OverheadType pointerOverheadTypeEncoding(const SparseTensorEncodingAttr &enc);
+OverheadType pointerOverheadTypeEncoding(SparseTensorEncodingAttr enc);
 
 /// Returns the OverheadType for index overhead storage.
-OverheadType indexOverheadTypeEncoding(const SparseTensorEncodingAttr &enc);
+OverheadType indexOverheadTypeEncoding(SparseTensorEncodingAttr enc);
 
 /// Returns the mlir::Type for pointer overhead storage.
-Type getPointerOverheadType(Builder &builder,
-                            const SparseTensorEncodingAttr &enc);
+Type getPointerOverheadType(Builder &builder, SparseTensorEncodingAttr enc);
 
 /// Returns the mlir::Type for index overhead storage.
-Type getIndexOverheadType(Builder &builder,
-                          const SparseTensorEncodingAttr &enc);
+Type getIndexOverheadType(Builder &builder, SparseTensorEncodingAttr enc);
 
 /// Convert OverheadType to its function-name suffix.
 StringRef overheadTypeFunctionSuffix(OverheadType ot);
@@ -281,14 +279,14 @@ inline Value constantOverheadTypeEncoding(OpBuilder &builder, Location loc,
 /// Generates a constant of the internal type-encoding for pointer
 /// overhead storage.
 inline Value constantPointerTypeEncoding(OpBuilder &builder, Location loc,
-                                         const SparseTensorEncodingAttr &enc) {
+                                         SparseTensorEncodingAttr enc) {
   return constantOverheadTypeEncoding(builder, loc, enc.getPointerBitWidth());
 }
 
 /// Generates a constant of the internal type-encoding for index overhead
 /// storage.
 inline Value constantIndexTypeEncoding(OpBuilder &builder, Location loc,
-                                       const SparseTensorEncodingAttr &enc) {
+                                       SparseTensorEncodingAttr enc) {
   return constantOverheadTypeEncoding(builder, loc, enc.getIndexBitWidth());
 }
 

diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index ba3c94c1330f9..2603b8d6b1ef9 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -84,7 +84,7 @@ static Value genDimSizeCall(OpBuilder &builder, Location loc, Value tensor,
 /// Looks up a level-size by returning a statically-computed constant
 /// (when possible), or by calling `genLvlSizeCall` (when dynamic).
 static Value createOrFoldLvlCall(OpBuilder &builder, Location loc,
-                                 SparseTensorEncodingAttr &enc, ShapedType stp,
+                                 SparseTensorEncodingAttr enc, ShapedType stp,
                                  Value tensor, unsigned lvl) {
   // Only sparse tensors have "levels" to query.
   assert(enc);
@@ -111,7 +111,7 @@ static Value createOrFoldLvlCall(OpBuilder &builder, Location loc,
 /// of sparse tensors) or `linalg::createOrFoldDimOp` (for dynamic sizes
 /// of dense tensors).
 static Value createOrFoldDimCall(OpBuilder &builder, Location loc,
-                                 SparseTensorEncodingAttr &enc, ShapedType stp,
+                                 SparseTensorEncodingAttr enc, ShapedType stp,
                                  Value tensor, unsigned dim) {
   auto s = stp.getShape()[dim];
   if (s != ShapedType::kDynamic)
@@ -123,7 +123,7 @@ static Value createOrFoldDimCall(OpBuilder &builder, Location loc,
 
 /// Populates the array with the dimension-sizes of the given tensor.
 static void fillDimSizes(OpBuilder &builder, Location loc,
-                         SparseTensorEncodingAttr &enc, ShapedType stp,
+                         SparseTensorEncodingAttr enc, ShapedType stp,
                          Value tensor, SmallVectorImpl<Value> &out) {
   unsigned dimRank = stp.getRank();
   out.reserve(dimRank);
@@ -133,7 +133,7 @@ static void fillDimSizes(OpBuilder &builder, Location loc,
 
 /// Returns an array with the dimension-sizes of the given tensor.
 static SmallVector<Value> getDimSizes(OpBuilder &builder, Location loc,
-                                      SparseTensorEncodingAttr &enc,
+                                      SparseTensorEncodingAttr enc,
                                       ShapedType stp, Value tensor) {
   SmallVector<Value> out;
   fillDimSizes(builder, loc, enc, stp, tensor, out);


        


More information about the Mlir-commits mailing list