[Mlir-commits] [mlir] Bok (PR #72466)

Aart Bik llvmlistbot at llvm.org
Wed Nov 15 18:56:21 PST 2023


https://github.com/aartbik created https://github.com/llvm/llvm-project/pull/72466

None

>From 8c51a66dd19f9d1ddb6ef3a390cf888e58923780 Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Wed, 15 Nov 2023 12:53:45 -0800
Subject: [PATCH 1/2] [mlir][sparse] fix broken test (merge conflict marker was
 left)

---
 mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir | 1 -
 1 file changed, 1 deletion(-)

diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
index de05320fbadb89f..2b9b73a1990e65f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
@@ -17,7 +17,6 @@
 // DEFINE: %{env} =
 //--------------------------------------------------------------------------------------------------
 
-<<<<<<< HEAD
 // RUN: %{compile} | %{run} | FileCheck %s
 //
 // Do the same run, but now with direct IR generation.

>From 59aa72a239a65fad04c4eac9776f44c6c8501be9 Mon Sep 17 00:00:00 2001
From: Aart Bik <ajcbik at google.com>
Date: Wed, 15 Nov 2023 18:48:38 -0800
Subject: [PATCH 2/2] [mlir][sparse] refactor dim2lvl/lvl2dim lvlsizes setup

This change provides access to the individual components
of dim sizes and lvl sizes after each codegenutil call.

This is step 2 out of 3 to make sparse_tensor.new work for BSR
---
 .../SparseTensor/Transforms/CodegenUtils.cpp  | 90 ++++++++++---------
 .../SparseTensor/Transforms/CodegenUtils.h    | 10 +--
 .../Transforms/SparseTensorCodegen.cpp        |  5 +-
 .../Transforms/SparseTensorConversion.cpp     | 41 ++++-----
 4 files changed, 73 insertions(+), 73 deletions(-)

diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
index 5c1d4437265cc93..11f7166127f2b82 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
@@ -639,25 +639,20 @@ Value sparse_tensor::createOrFoldSliceStrideOp(OpBuilder &builder, Location loc,
   return builder.create<ToSliceStrideOp>(loc, tensor, APInt(64, dim));
 }
 
-void sparse_tensor::fillDimShape(OpBuilder &builder, Location loc,
-                                 SparseTensorType stt,
-                                 SmallVectorImpl<Value> &out) {
-  out.clear();
-  out.reserve(stt.getDimRank());
-  for (const Size sz : stt.getDimShape()) {
-    const auto s = ShapedType::isDynamic(sz) ? 0 : sz;
-    out.push_back(constantIndex(builder, loc, s));
-  }
-}
-
 Value sparse_tensor::genReader(OpBuilder &builder, Location loc,
                                SparseTensorType stt, Value tensor,
-                               /*out*/ SmallVectorImpl<Value> &dimShapesValues,
+                               /*out*/ SmallVectorImpl<Value> &dimSizesValues,
                                /*out*/ Value &dimSizesBuffer) {
-  // Construct the dimShapes buffer. The buffer contains the static size
-  // per dimension, or otherwise a zero for a dynamic size.
-  fillDimShape(builder, loc, stt, dimShapesValues);
-  Value dimShapesBuffer = allocaBuffer(builder, loc, dimShapesValues);
+  // Construct the dimension **shapes** buffer. The buffer contains the static
+  // size per dimension, or otherwise a zero for a dynamic size.
+  Dimension dimRank = stt.getDimRank();
+  dimSizesValues.clear();
+  dimSizesValues.reserve(dimRank);
+  for (const Size sz : stt.getDimShape()) {
+    const auto s = ShapedType::isDynamic(sz) ? 0 : sz;
+    dimSizesValues.push_back(constantIndex(builder, loc, s));
+  }
+  Value dimShapesBuffer = allocaBuffer(builder, loc, dimSizesValues);
   // Create the `CheckedSparseTensorReader`. This reader performs a
   // consistency check on the static sizes, but accepts any size
   // of each dimension with a dynamic size.
@@ -679,18 +674,27 @@ Value sparse_tensor::genReader(OpBuilder &builder, Location loc,
         createFuncCall(builder, loc, "getSparseTensorReaderDimSizes", memTp,
                        reader, EmitCInterface::On)
             .getResult(0);
+    // Also convert the dim shapes values into dim sizes values, just in case
+    // subsequent clients need the values (DCE will remove unused.
+    for (Dimension d = 0; d < dimRank; d++) {
+      if (stt.isDynamicDim(d))
+        dimSizesValues[d] = builder.create<memref::LoadOp>(
+            loc, dimSizesBuffer, constantIndex(builder, loc, d));
+    }
   }
   return reader;
 }
 
-Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
-                                   SparseTensorType stt,
-                                   ArrayRef<Value> dimShapesValues,
-                                   Value dimSizesBuffer,
-                                   /*out*/ Value &dim2lvlBuffer,
-                                   /*out*/ Value &lvl2dimBuffer) {
+Value sparse_tensor::genMapBuffers(
+    OpBuilder &builder, Location loc, SparseTensorType stt,
+    ArrayRef<Value> dimSizesValues, Value dimSizesBuffer,
+    /*out*/ SmallVectorImpl<Value> &lvlSizesValues,
+    /*out*/ Value &dim2lvlBuffer,
+    /*out*/ Value &lvl2dimBuffer) {
   const Dimension dimRank = stt.getDimRank();
   const Level lvlRank = stt.getLvlRank();
+  lvlSizesValues.clear();
+  lvlSizesValues.reserve(lvlRank);
   // For an identity mapping, the dim2lvl and lvl2dim mappings are
   // identical as are dimSizes and lvlSizes, so buffers are reused
   // as much as possible.
@@ -698,10 +702,12 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
     assert(dimRank == lvlRank);
     SmallVector<Value> iotaValues;
     iotaValues.reserve(lvlRank);
-    for (Level l = 0; l < lvlRank; l++)
+    for (Level l = 0; l < lvlRank; l++) {
       iotaValues.push_back(constantIndex(builder, loc, l));
+      lvlSizesValues.push_back(dimSizesValues[l]);
+    }
     dim2lvlBuffer = lvl2dimBuffer = allocaBuffer(builder, loc, iotaValues);
-    return dimSizesBuffer;
+    return dimSizesBuffer; // now lvlSizesBuffer
   }
   // Otherwise, some code needs to be generated to set up the buffers.
   // This code deals with permutations as well as non-permutations that
@@ -710,7 +716,6 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
   const auto lvlToDim = stt.getLvlToDim();
   SmallVector<Value> dim2lvlValues(lvlRank); // for each lvl, expr in dim vars
   SmallVector<Value> lvl2dimValues(dimRank); // for each dim, expr in lvl vars
-  SmallVector<Value> lvlSizesValues(lvlRank);
   // Generate dim2lvl.
   assert(lvlRank == dimToLvl.getNumResults());
   for (Level l = 0; l < lvlRank; l++) {
@@ -723,19 +728,19 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
     uint64_t cf = 0, cm = 0;
     switch (exp.getKind()) {
     case AffineExprKind::DimId: {
-      d = cast<AffineDimExpr>(exp).getPosition();
+      d = exp.cast<AffineDimExpr>().getPosition();
       break;
     }
     case AffineExprKind::FloorDiv: {
-      auto floor = cast<AffineBinaryOpExpr>(exp);
-      d = cast<AffineDimExpr>(floor.getLHS()).getPosition();
-      cf = cast<AffineConstantExpr>(floor.getRHS()).getValue();
+      auto floor = exp.cast<AffineBinaryOpExpr>();
+      d = floor.getLHS().cast<AffineDimExpr>().getPosition();
+      cf = floor.getRHS().cast<AffineConstantExpr>().getValue();
       break;
     }
     case AffineExprKind::Mod: {
-      auto mod = cast<AffineBinaryOpExpr>(exp);
-      d = cast<AffineDimExpr>(mod.getLHS()).getPosition();
-      cm = cast<AffineConstantExpr>(mod.getRHS()).getValue();
+      auto mod = exp.cast<AffineBinaryOpExpr>();
+      d = mod.getLHS().cast<AffineDimExpr>().getPosition();
+      cm = mod.getRHS().cast<AffineConstantExpr>().getValue();
       break;
     }
     default:
@@ -748,17 +753,14 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
     //    (3) l = d % c    : c
     Value lvlSz;
     if (cm == 0) {
-      lvlSz = dimShapesValues[d];
-      if (stt.isDynamicDim(d))
-        lvlSz = builder.create<memref::LoadOp>(loc, dimSizesBuffer,
-                                               constantIndex(builder, loc, d));
+      lvlSz = dimSizesValues[d];
       if (cf != 0)
         lvlSz = builder.create<arith::DivUIOp>(loc, lvlSz,
                                                constantIndex(builder, loc, cf));
     } else {
       lvlSz = constantIndex(builder, loc, cm);
     }
-    lvlSizesValues[l] = lvlSz;
+    lvlSizesValues.push_back(lvlSz);
   }
   // Generate lvl2dim.
   assert(dimRank == lvlToDim.getNumResults());
@@ -771,17 +773,17 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
     uint64_t c = 0;
     switch (exp.getKind()) {
     case AffineExprKind::DimId: {
-      l = cast<AffineDimExpr>(exp).getPosition();
+      l = exp.cast<AffineDimExpr>().getPosition();
       break;
     }
     case AffineExprKind::Add: {
       // Always mul on lhs, symbol/constant on rhs.
-      auto add = cast<AffineBinaryOpExpr>(exp);
+      auto add = exp.cast<AffineBinaryOpExpr>();
       assert(add.getLHS().getKind() == AffineExprKind::Mul);
-      auto mul = cast<AffineBinaryOpExpr>(add.getLHS());
-      ll = cast<AffineDimExpr>(mul.getLHS()).getPosition();
-      c = cast<AffineConstantExpr>(mul.getRHS()).getValue();
-      l = cast<AffineDimExpr>(add.getRHS()).getPosition();
+      auto mul = add.getLHS().cast<AffineBinaryOpExpr>();
+      ll = mul.getLHS().cast<AffineDimExpr>().getPosition();
+      c = mul.getRHS().cast<AffineConstantExpr>().getValue();
+      l = add.getRHS().cast<AffineDimExpr>().getPosition();
       break;
     }
     default:
@@ -792,5 +794,5 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
   // Return buffers.
   dim2lvlBuffer = allocaBuffer(builder, loc, dim2lvlValues);
   lvl2dimBuffer = allocaBuffer(builder, loc, lvl2dimValues);
-  return allocaBuffer(builder, loc, lvlSizesValues);
+  return allocaBuffer(builder, loc, lvlSizesValues); // lvlSizesBuffer
 }
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
index d3b0889b71b514c..0e871d8e10aadf9 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
@@ -317,20 +317,16 @@ Value createOrFoldSliceOffsetOp(OpBuilder &builder, Location loc, Value tensor,
 Value createOrFoldSliceStrideOp(OpBuilder &builder, Location loc, Value tensor,
                                 Dimension dim);
 
-/// Populates the array with the dimension-shape of the given
-/// `SparseTensorType`, where dynamic sizes are represented by zero.
-void fillDimShape(OpBuilder &builder, Location loc, SparseTensorType stt,
-                  SmallVectorImpl<Value> &out);
-
 /// Generates code that opens a reader and sets the dimension sizes.
 Value genReader(OpBuilder &builder, Location loc, SparseTensorType stt,
                 Value tensor,
-                /*out*/ SmallVectorImpl<Value> &dimShapeValues,
+                /*out*/ SmallVectorImpl<Value> &dimSizesValues,
                 /*out*/ Value &dimSizesBuffer);
 
 /// Generates code to set up the buffer parameters for a map.
 Value genMapBuffers(OpBuilder &builder, Location loc, SparseTensorType stt,
-                    ArrayRef<Value> dimShapeValues, Value dimSizesBuffer,
+                    ArrayRef<Value> dimSizesValues, Value dimSizesBuffer,
+                    /*out*/ SmallVectorImpl<Value> &lvlSizesValues,
                     /*out*/ Value &dim2lvlBuffer,
                     /*out*/ Value &lvl2dimBuffer);
 
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 888f513be2e4dc7..cfc8eb19918b77e 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -1484,11 +1484,12 @@ struct SparseNewConverter : public OpConversionPattern<NewOp> {
     createAllocFields(rewriter, loc, dstTp, dynSizes, /*enableInit=*/false,
                       fields, nse);
 
-    // Now construct the dim2lvl and lvl2dim buffers.
+    // Now construct the lvl sizes and the dim2lvl/lvl2dim buffers.
+    SmallVector<Value> lvlSizesValues;
     Value dim2lvlBuffer;
     Value lvl2dimBuffer;
     genMapBuffers(rewriter, loc, dstTp, dimShapesValues, dimSizesBuffer,
-                  dim2lvlBuffer, lvl2dimBuffer);
+                  lvlSizesValues, dim2lvlBuffer, lvl2dimBuffer);
 
     // Read the COO tensor data.
     MutSparseTensorDescriptor desc(dstTp, fields);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index e629133171e15dc..f8c7aba455c0f11 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -199,9 +199,10 @@ class NewCallParams final {
     params[kParamDimSizes] = dimSizesBuffer
                                  ? dimSizesBuffer
                                  : allocaBuffer(builder, loc, dimSizesValues);
-    params[kParamLvlSizes] =
-        genMapBuffers(builder, loc, stt, dimSizesValues, params[kParamDimSizes],
-                      params[kParamDim2Lvl], params[kParamLvl2Dim]);
+    SmallVector<Value> lvlSizesValues; // unused
+    params[kParamLvlSizes] = genMapBuffers(
+        builder, loc, stt, dimSizesValues, params[kParamDimSizes],
+        lvlSizesValues, params[kParamDim2Lvl], params[kParamLvl2Dim]);
     // Secondary and primary types encoding.
     const auto enc = stt.getEncoding();
     params[kParamPosTp] = constantPosTypeEncoding(builder, loc, enc);
@@ -369,13 +370,13 @@ class SparseTensorNewConverter : public OpConversionPattern<NewOp> {
     if (!stt.hasEncoding())
       return failure();
     // Construct the `reader` opening method calls.
-    SmallVector<Value> dimShapesValues;
+    SmallVector<Value> dimSizesValues;
     Value dimSizesBuffer;
     Value reader = genReader(rewriter, loc, stt, adaptor.getOperands()[0],
-                             dimShapesValues, dimSizesBuffer);
+                             dimSizesValues, dimSizesBuffer);
     // Use the `reader` to parse the file.
     Value tensor = NewCallParams(rewriter, loc)
-                       .genBuffers(stt, dimShapesValues, dimSizesBuffer)
+                       .genBuffers(stt, dimSizesValues, dimSizesBuffer)
                        .genNewCall(Action::kFromReader, reader);
     // Free the memory for `reader`.
     createFuncCall(rewriter, loc, "delSparseTensorReader", {}, {reader},
@@ -402,11 +403,11 @@ class SparseTensorAllocConverter
     // Gather all dimension sizes as SSA values.
     Location loc = op.getLoc();
     const Dimension dimRank = stt.getDimRank();
-    SmallVector<Value> dimSizes;
-    dimSizes.reserve(dimRank);
+    SmallVector<Value> dimSizesValues;
+    dimSizesValues.reserve(dimRank);
     unsigned operandCtr = 0;
     for (Dimension d = 0; d < dimRank; d++) {
-      dimSizes.push_back(
+      dimSizesValues.push_back(
           stt.isDynamicDim(d)
               ? adaptor.getOperands()[operandCtr++]
               : constantIndex(rewriter, loc, op.getStaticSize(d)));
@@ -414,7 +415,7 @@ class SparseTensorAllocConverter
     // Generate the call to construct empty tensor. The sizes are
     // explicitly defined by the arguments to the alloc operator.
     rewriter.replaceOp(op, NewCallParams(rewriter, loc)
-                               .genBuffers(stt, dimSizes)
+                               .genBuffers(stt, dimSizesValues)
                                .genNewCall(Action::kEmpty));
     return success();
   }
@@ -433,19 +434,19 @@ class SparseTensorEmptyConverter : public OpConversionPattern<tensor::EmptyOp> {
       return failure();
     // Gather all dimension sizes as SSA values.
     const Dimension dimRank = stt.getDimRank();
-    SmallVector<Value> dimSizes;
-    dimSizes.reserve(dimRank);
+    SmallVector<Value> dimSizesValues;
+    dimSizesValues.reserve(dimRank);
     auto shape = op.getType().getShape();
     unsigned operandCtr = 0;
     for (Dimension d = 0; d < dimRank; d++) {
-      dimSizes.push_back(stt.isDynamicDim(d)
-                             ? adaptor.getOperands()[operandCtr++]
-                             : constantIndex(rewriter, loc, shape[d]));
+      dimSizesValues.push_back(stt.isDynamicDim(d)
+                                   ? adaptor.getOperands()[operandCtr++]
+                                   : constantIndex(rewriter, loc, shape[d]));
     }
     // Generate the call to construct empty tensor. The sizes are
     // explicitly defined by the arguments to the alloc operator.
     rewriter.replaceOp(op, NewCallParams(rewriter, loc)
-                               .genBuffers(stt, dimSizes)
+                               .genBuffers(stt, dimSizesValues)
                                .genNewCall(Action::kEmpty));
     return success();
   }
@@ -467,8 +468,8 @@ class SparseTensorReorderCOOConverter
     const Value src = adaptor.getInputCoo();
 
     NewCallParams params(rewriter, loc);
-    SmallVector<Value> dimSizes = getDimSizes(rewriter, loc, srcTp, src);
-    rewriter.replaceOp(op, params.genBuffers(dstTp, dimSizes)
+    SmallVector<Value> dimSizesValues = getDimSizes(rewriter, loc, srcTp, src);
+    rewriter.replaceOp(op, params.genBuffers(dstTp, dimSizesValues)
                                .genNewCall(Action::kSortCOOInPlace, src));
 
     return success();
@@ -706,14 +707,14 @@ class SparseTensorAssembleConverter : public OpConversionPattern<AssembleOp> {
     const Location loc = op->getLoc();
     const auto dstTp = getSparseTensorType(op.getResult());
     assert(dstTp.hasStaticDimShape());
-    SmallVector<Value> dimSizes = getDimSizes(rewriter, loc, dstTp);
+    SmallVector<Value> dimSizesValues = getDimSizes(rewriter, loc, dstTp);
     // Use a library method to transfer the external buffers from
     // clients to the internal SparseTensorStorage. Since we cannot
     // assume clients transfer ownership of the buffers, this method
     // will copy all data over into a new SparseTensorStorage.
     Value dst =
         NewCallParams(rewriter, loc)
-            .genBuffers(dstTp.withoutDimToLvl(), dimSizes)
+            .genBuffers(dstTp.withoutDimToLvl(), dimSizesValues)
             .genNewCall(Action::kPack,
                         genLvlPtrsBuffers(rewriter, loc, adaptor.getLevels(),
                                           adaptor.getValues()));



More information about the Mlir-commits mailing list