[Mlir-commits] [mlir] [mlir][sparse] optimize memory load to SSA value when generating spar… (PR #74750)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Thu Dec 7 11:13:31 PST 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir-sparse

Author: Peiming Liu (PeimingLiu)

<details>
<summary>Changes</summary>

…se conv kernel.

---

Patch is 48.58 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/74750.diff


3 Files Affected:

- (modified) mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp (+16-23) 
- (modified) mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h (+6-20) 
- (modified) mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir (+186-204) 


``````````diff
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
index ff8561534a376..d35d6f6a5b5a5 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
@@ -167,14 +167,6 @@ static void updateSlicePosPtr(OpBuilder &builder, Location loc, Value sPosBuf,
                               Value pPtr) {
   builder.create<memref::StoreOp>(loc, pPtr, sPosBuf, C_IDX(1));
 }
-static Value loadSlicePosTupleNum(OpBuilder &builder, Location loc,
-                                  Value sPosBuf) {
-  return genIndexLoad(builder, loc, sPosBuf, C_IDX(0));
-}
-static void updateSlicePosTupleNum(OpBuilder &builder, Location loc, Value num,
-                                   Value sPosBuf) {
-  builder.create<memref::StoreOp>(loc, num, sPosBuf, C_IDX(0));
-}
 
 // Gets and sets position values for slice-driven loops.
 enum class SlicePosKind { kLo, kHi, kNext };
@@ -405,7 +397,7 @@ void LoopEmitter::initialize(ValueRange ts, StringAttr loopTag, bool hasOutput,
     sliceMeta[tid].assign(lvlRank, std::vector<std::pair<Value, unsigned>>());
     sliceStack[tid].emplace_back(/*minCrd=*/Value(),
                                  /*offset=*/Value(), /*isNonEmpty*/ Value(),
-                                 std::nullopt, 0);
+                                 /*posTupleNum=*/Value(), std::nullopt, 0);
     if (dimGetter && !isSynTensor(tid)) {
       for (Level l = 0; l < lvlRank; l++) {
         dependentLvlMap[tid][l] = dimGetter(tid, l);
@@ -1797,7 +1789,7 @@ ValueRange LoopEmitter::genUnResolvedSliceTreeTraverse(
         unsigned depth = frontSlice.depth - 1;
         Value offset = frontSlice.offset;
         Value sPtrBuf = slicePosBuffer[tid][firstLvl][depth];
-        Value mSz = loadSlicePosTupleNum(builder, loc, sPtrBuf);
+        Value mSz = frontSlice.posTupleNum;
         outerMost = builder.create<scf::ForOp>(
             loc, c0, mSz, c1, innerArgs,
             [this, tid, firstLvl, offset, sPtrBuf, &ip, &pos,
@@ -1908,7 +1900,7 @@ void LoopEmitter::genResolvedSliceBegin(OpBuilder &builder, Location loc,
     // Dense slice begin is trivial.
     sliceStack[tid].emplace_back(/*minCoord=*/c0, /*offset=*/c0,
                                  /*nonEmpty=*/constantI1(builder, loc, true),
-                                 lvl, /*depth=*/1);
+                                 c0, lvl, /*depth=*/1);
     return;
   }
   auto [nxSz, stride] = sliceMeta[tid][lvl][1];
@@ -1924,12 +1916,13 @@ void LoopEmitter::genResolvedSliceBegin(OpBuilder &builder, Location loc,
     pHi = genIndexLoad(builder, loc, positionsBuffers[tid][lvl],
                        ADDI(posits[tid][lvl - 1], c1));
   }
-  // Fills out pIdxBuffer[tid][lvl][0] with [/*memSize =*/4, 0, pLo, pHi]
-  updateSlicePosTupleNum(builder, loc, c1, sPtrBuf);
+  // Fills out pIdxBuffer[tid][lvl][0] with  [0, pLo, pHi]
   updateSlicePosPtr(builder, loc, sPtrBuf, c0);
   updateSlicePos(builder, loc, sPtrBuf, pLo, c0, SlicePosKind::kLo);
   updateSlicePos(builder, loc, sPtrBuf, pHi, c0, SlicePosKind::kHi);
-
+  // Slice over a resolved parent, we only need one pair of pos hi and lo to
+  // specified the current slice.
+  Value tupleNum = c1;
   // This is an non empty tensor if pLo < pHi.
   Value isNonEmpty = CMPI(ult, pLo, pHi);
   // The minimal coord must be at the first on ordered level.
@@ -1941,7 +1934,7 @@ void LoopEmitter::genResolvedSliceBegin(OpBuilder &builder, Location loc,
 
   // FIXME: We need the relative offset related to the base slice.
   Value absOffset = offsetFromMinCoord(builder, loc, minCrd, nxSz, isNonEmpty);
-  sliceStack[tid].emplace_back(minCrd, absOffset, isNonEmpty, lvl,
+  sliceStack[tid].emplace_back(minCrd, absOffset, isNonEmpty, tupleNum, lvl,
                                /*depth=*/1);
 }
 
@@ -1973,8 +1966,8 @@ void LoopEmitter::genUnResolvedSliceBegin(OpBuilder &builder, Location loc,
   Value remSz = sliceMeta[tid][lvl][depth + 1].first;
   // Dense slice begin is trivial
   if (isDenseLT(lvlTypes[tid][lvl])) {
-    sliceStack[tid].emplace_back(c0, c0, constantI1(builder, loc, false), lvl,
-                                 depth + 1);
+    sliceStack[tid].emplace_back(c0, c0, constantI1(builder, loc, false), c0,
+                                 lvl, depth + 1);
     return;
   }
 
@@ -2064,11 +2057,11 @@ void LoopEmitter::genUnResolvedSliceBegin(OpBuilder &builder, Location loc,
   Value minCrd = result[1];
   // Two metadata [memSize, idx].
   // TODO: Can use an SSA value for these two metadata
-  updateSlicePosTupleNum(builder, loc, result[2], sPtrBuf);
   updateSlicePosPtr(builder, loc, sPtrBuf, c0);
   // FIXME: we need the relative offset related to the base slice.
   Value absOffset = offsetFromMinCoord(builder, loc, minCrd, remSz, isNonEmpty);
-  sliceStack[tid].emplace_back(minCrd, absOffset, isNonEmpty, lvl, depth + 1);
+  sliceStack[tid].emplace_back(minCrd, absOffset, isNonEmpty, result[2], lvl,
+                               depth + 1);
 }
 
 bool LoopEmitter::genSliceBegin(OpBuilder &builder, Location loc, TensorId tid,
@@ -2212,10 +2205,10 @@ LoopEmitter::genSliceNextInduction(OpBuilder &builder, Location loc,
     //    offset = minCrd - size + 1;
     // }
     builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
-    reduc[2] = absOffset;                                    // restore value.
-    Value mSz = loadSlicePosTupleNum(builder, loc, sPtrBuf); // memSize
-    reduc[0] = lvlSizes[tid][lvl];                           // next min coord
-    reduc[1] = constantI1(builder, loc, false);              // isNonEmpty
+    reduc[2] = absOffset;                       // restore value.
+    Value mSz = info.posTupleNum;               // tuple number.
+    reduc[0] = lvlSizes[tid][lvl];              // next min coord
+    reduc[1] = constantI1(builder, loc, false); // isNonEmpty
     auto loopArgs = static_cast<ValueRange>(reduc).drop_back();
     auto forOp = scf::buildLoopNest(
         builder, loc, c0, mSz, c1, loopArgs,
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h
index 0bdd9d45e6ae6..5e51cb2110fa1 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h
@@ -298,10 +298,10 @@ class LoopEmitter {
   struct SliceInfo final {
     // Note that we do not need to create a actual sparse tensor slice but
     // instead only need to maintain the metadata of the slice.
-    SliceInfo(Value minCrd, Value offset, Value isNonEmpty,
+    SliceInfo(Value minCrd, Value offset, Value isNonEmpty, Value posTupleNum,
               std::optional<Level> slicedOnLvl, unsigned depth)
         : minCrd(minCrd), offset(offset), isNonEmpty(isNonEmpty),
-          slicedOnLvl(slicedOnLvl), depth(depth) {
+          posTupleNum(posTupleNum), slicedOnLvl(slicedOnLvl), depth(depth) {
       // TODO: use std::optional<pair<Level, minCrd>>
       assert(!slicedOnLvl || minCrd);
     }
@@ -309,9 +309,10 @@ class LoopEmitter {
     // Whether this is the tensor that has not yet been sliced.
     bool isInitialTensor() const { return !slicedOnLvl.has_value(); }
 
-    Value minCrd;     // the minimum coordinate of the slice.
-    Value offset;     // the *absolute* offset of the current slice.
-    Value isNonEmpty; // whether the slice is empty.
+    Value minCrd;      // the minimum coordinate of the slice.
+    Value offset;      // the *absolute* offset of the current slice.
+    Value isNonEmpty;  // whether the slice is empty.
+    Value posTupleNum; // The number of position tuples used in the slice.
     std::optional<Level> slicedOnLvl; // the level on which the slice is done
     unsigned depth; // the depth (relative to dependentDimMap[tid][lvl]).
   };
@@ -650,17 +651,6 @@ class LoopEmitter {
   std::vector<std::vector<LevelType>> lvlTypes;
   // Sparse iteration information for each `(TensorId, Level)` pair.
   // These arrays are updated to remain current within the current loop.
-  // TODO: Clarify which of these are indexed by dstLvl vs srcLvl.
-  //
-  /// The collection of positions for a given element (one such collection
-  /// for each tensor).  This is the position analogue of the "coords"
-  /// naming convention.
-  ///
-  /// FIXME: [CLARIFY_POSITS_LVL] It's unclear which levels are used
-  /// to index the `posits` array.  On the one hand `genSparseCrd`
-  /// uses dstLvl; on the other hand `enterLoopOverTensorAtLvl`,
-  /// `prepareLoopOverTensorAtLvl`, and `enterCoIterationOverTensorsAtLvls`
-  /// uses srcLvl.  So which is it?
   std::vector<std::vector<Value>> posits;
   /// The collection of coordinates for a given element (one such
   /// collection for each tensor).
@@ -704,10 +694,6 @@ class LoopEmitter {
   // sliceStack[tid] holds the generated slice stack on tid.
   std::vector<std::vector<SliceInfo>> sliceStack;
 
-  /// TODO: not yet used, it should track the current level for each tensor
-  /// to help eliminate `lvls` paramters from above APIs.
-  /// std::vector<Level> curLvl;
-
   //
   // Fields which have at most `numLoops` many entries.
   //
diff --git a/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir b/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir
index 0f99a0206e4cb..02cc5d1e2ef34 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir
@@ -8,8 +8,8 @@
 
 
 // CHECK-LABEL:   func.func @conv2d_all_sparse_CSR(
-// CHECK-SAME:      %[[VAL_0:.*]]: tensor<8x8xi32, #sparse{{[0-9]*}}>,
-// CHECK-SAME:      %[[VAL_1:.*]]: tensor<3x3xi32>) -> tensor<6x6xi32, #sparse{{[0-9]*}}>
+// CHECK-SAME:      %[[VAL_0:.*]]: tensor<8x8xi32, #sparse>,
+// CHECK-SAME:      %[[VAL_1:.*]]: tensor<3x3xi32>) -> tensor<6x6xi32, #sparse> {
 // CHECK-DAG:       %[[VAL_2:.*]] = arith.constant true
 // CHECK-DAG:       %[[VAL_3:.*]] = arith.constant -2 : index
 // CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 4 : index
@@ -21,250 +21,232 @@
 // CHECK-DAG:       %[[VAL_10:.*]] = arith.constant 0 : index
 // CHECK-DAG:       %[[VAL_11:.*]] = arith.constant 0 : i32
 // CHECK-DAG:       %[[VAL_12:.*]] = arith.constant false
-// CHECK-DAG:       %[[VAL_13:.*]] = tensor.empty() : tensor<6x6xi32, #sparse{{[0-9]*}}>
-// CHECK-DAG:       %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #sparse{{[0-9]*}}> to memref<?xindex>
-// CHECK-DAG:       %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #sparse{{[0-9]*}}> to memref<?xindex>
-// CHECK-DAG:       %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xi32, #sparse{{[0-9]*}}> to memref<?xindex>
-// CHECK-DAG:       %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xi32, #sparse{{[0-9]*}}> to memref<?xindex>
-// CHECK-DAG:       %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xi32, #sparse{{[0-9]*}}> to memref<?xi32>
-// CHECK-DAG:       %[[VAL_19:.*]] = memref.alloca() : memref<11xindex>
-// CHECK-DAG:       %[[VAL_20:.*]] = memref.alloca() : memref<5xindex>
-// CHECK-DAG:       %[[VAL_21:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_7]]] : memref<?xindex>
-// CHECK-DAG:       memref.store %[[VAL_7]], %[[VAL_20]]{{\[}}%[[VAL_10]]] : memref<5xindex>
-// CHECK-DAG:       memref.store %[[VAL_10]], %[[VAL_20]]{{\[}}%[[VAL_7]]] : memref<5xindex>
-// CHECK-DAG:       memref.store %[[VAL_10]], %[[VAL_20]]{{\[}}%[[VAL_9]]] : memref<5xindex>
-// CHECK-DAG:       memref.store %[[VAL_21]], %[[VAL_20]]{{\[}}%[[VAL_6]]] : memref<5xindex>
+// CHECK-DAG:       %[[VAL_13:.*]] = tensor.empty() : tensor<6x6xi32, #sparse>
+// CHECK-DAG:       %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #sparse> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #sparse> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xi32, #sparse> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xi32, #sparse> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xi32, #sparse> to memref<?xi32>
+// CHECK:           %[[VAL_19:.*]] = memref.alloca() : memref<11xindex>
+// CHECK:           %[[VAL_20:.*]] = memref.alloca() : memref<5xindex>
+// CHECK:           %[[VAL_21:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_7]]] : memref<?xindex>
+// CHECK:           memref.store %[[VAL_10]], %[[VAL_20]]{{\[}}%[[VAL_7]]] : memref<5xindex>
+// CHECK:           memref.store %[[VAL_10]], %[[VAL_20]]{{\[}}%[[VAL_9]]] : memref<5xindex>
+// CHECK:           memref.store %[[VAL_21]], %[[VAL_20]]{{\[}}%[[VAL_6]]] : memref<5xindex>
 // CHECK:           %[[VAL_22:.*]] = arith.cmpi ugt, %[[VAL_21]], %[[VAL_10]] : index
 // CHECK:           %[[VAL_23:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_10]]] : memref<?xindex>
 // CHECK:           %[[VAL_24:.*]] = arith.cmpi uge, %[[VAL_23]], %[[VAL_6]] : index
 // CHECK:           %[[VAL_25:.*]] = arith.andi %[[VAL_22]], %[[VAL_24]] : i1
 // CHECK:           %[[VAL_26:.*]] = arith.addi %[[VAL_23]], %[[VAL_3]] : index
 // CHECK:           %[[VAL_27:.*]] = arith.select %[[VAL_25]], %[[VAL_26]], %[[VAL_10]] : index
-// CHECK:           %[[VAL_28:.*]]:3 = scf.while (%[[VAL_29:.*]] = %[[VAL_22]], %[[VAL_30:.*]] = %[[VAL_23]], %[[VAL_31:.*]] = %[[VAL_27]], %[[VAL_32:.*]] = %[[VAL_13]]) : (i1, index, index, tensor<6x6xi32, #sparse{{[0-9]*}}>) -> (index, index, tensor<6x6xi32, #sparse{{[0-9]*}}>) {
-// CHECK:             scf.condition(%[[VAL_29]]) %[[VAL_30]], %[[VAL_31]], %[[VAL_32]] : index, index, tensor<6x6xi32, #sparse{{[0-9]*}}>
+// CHECK:           %[[VAL_28:.*]]:3 = scf.while (%[[VAL_29:.*]] = %[[VAL_22]], %[[VAL_30:.*]] = %[[VAL_23]], %[[VAL_31:.*]] = %[[VAL_27]], %[[VAL_32:.*]] = %[[VAL_13]]) : (i1, index, index, tensor<6x6xi32, #sparse>) -> (index, index, tensor<6x6xi32, #sparse>) {
+// CHECK:             scf.condition(%[[VAL_29]]) %[[VAL_30]], %[[VAL_31]], %[[VAL_32]] : index, index, tensor<6x6xi32, #sparse>
 // CHECK:           } do {
-// CHECK:           ^bb0(%[[VAL_33:.*]]: index, %[[VAL_34:.*]]: index, %[[VAL_35:.*]]: tensor<6x6xi32, #sparse{{[0-9]*}}>):
-// CHECK:             %[[VAL_36:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_10]]] : memref<5xindex>
-// CHECK:             %[[VAL_37:.*]]:4 = scf.for %[[VAL_38:.*]] = %[[VAL_10]] to %[[VAL_36]] step %[[VAL_7]] iter_args(%[[VAL_39:.*]] = %[[VAL_12]], %[[VAL_40:.*]] = %[[VAL_5]], %[[VAL_41:.*]] = %[[VAL_10]], %[[VAL_42:.*]] = %[[VAL_10]]) -> (i1, index, index, index) {
-// CHECK:               %[[VAL_43:.*]] = arith.addi %[[VAL_38]], %[[VAL_9]] : index
-// CHECK:               %[[VAL_44:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_43]]] : memref<5xindex>
-// CHECK:               %[[VAL_45:.*]] = arith.addi %[[VAL_38]], %[[VAL_6]] : index
-// CHECK:               %[[VAL_46:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_45]]] : memref<5xindex>
-// CHECK:               %[[VAL_47:.*]] = arith.addi %[[VAL_38]], %[[VAL_4]] : index
-// CHECK:               memref.store %[[VAL_42]], %[[VAL_20]]{{\[}}%[[VAL_47]]] : memref<5xindex>
-// CHECK:               %[[VAL_48:.*]] = arith.addi %[[VAL_34]], %[[VAL_6]] : index
-// CHECK:               %[[VAL_49:.*]]:5 = scf.while (%[[VAL_50:.*]] = %[[VAL_44]], %[[VAL_51:.*]] = %[[VAL_39]], %[[VAL_52:.*]] = %[[VAL_40]], %[[VAL_53:.*]] = %[[VAL_41]], %[[VAL_54:.*]] = %[[VAL_42]]) : (index, i1, index, index, index) -> (index, i1, index, index, index) {
-// CHECK:                 %[[VAL_55:.*]] = arith.cmpi ult, %[[VAL_50]], %[[VAL_46]] : index
-// CHECK:                 %[[VAL_56:.*]] = scf.if %[[VAL_55]] -> (i1) {
-// CHECK:                   %[[VAL_57:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_50]]] : memref<?xindex>
-// CHECK:                   %[[VAL_58:.*]] = arith.cmpi ult, %[[VAL_57]], %[[VAL_48]] : index
-// CHECK:                   scf.yield %[[VAL_58]] : i1
-// CHECK:                 } else {
-// CHECK:                   scf.yield %[[VAL_12]] : i1
-// CHECK:                 }
-// CHECK:                 scf.condition(%[[VAL_56]]) %[[VAL_50]], %[[VAL_51]], %[[VAL_52]], %[[VAL_53]], %[[VAL_54]] : index, i1, index, index, index
-// CHECK:               } do {
-// CHECK:               ^bb0(%[[VAL_59:.*]]: index, %[[VAL_60:.*]]: i1, %[[VAL_61:.*]]: index, %[[VAL_62:.*]]: index, %[[VAL_63:.*]]: index):
-// CHECK:                 %[[VAL_64:.*]] = arith.addi %[[VAL_59]], %[[VAL_7]] : index
-// CHECK:                 %[[VAL_65:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_59]]] : memref<?xindex>
-// CHECK:                 %[[VAL_66:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_64]]] : memref<?xindex>
-// CHECK:                 %[[VAL_67:.*]] = arith.cmpi ult, %[[VAL_65]], %[[VAL_66]] : index
-// CHECK:                 %[[VAL_68:.*]] = arith.ori %[[VAL_67]], %[[VAL_60]] : i1
-// CHECK:                 %[[VAL_69:.*]] = scf.if %[[VAL_67]] -> (index) {
-// CHECK:                   %[[VAL_70:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_65]]] : memref<?xindex>
-// CHECK:                   %[[VAL_71:.*]] = arith.cmpi ult, %[[VAL_70]], %[[VAL_61]] : index
-// CHECK:                   %[[VAL_72:.*]] = arith.select %[[VAL_71]], %[[VAL_70]], %[[VAL_61]] : index
-// CHECK:                   scf.yield %[[VAL_72]] : index
-// CHECK:                 } else {
-// CHECK:                   scf.yield %[[VAL_61]] : index
-// CHECK:                 }
-// CHECK:                 %[[VAL_73:.*]] = arith.addi %[[VAL_62]], %[[VAL_9]] : index
-// CHECK:                 memref.store %[[VAL_65]], %[[VAL_19]]{{\[}}%[[VAL_73]]] : memref<11xindex>
-// CHECK:                 %[[VAL_74:.*]] = arith.addi %[[VAL_62]], %[[VAL_8]] : index
-// CHECK:                 memref.store %[[VAL_66]], %[[VAL_19]]{{\[}}%[[VAL_74]]] : memref<11xindex>
-// CHECK:                 %[[VAL_75:.*]] = arith.addi %[[VAL_62]], %[[VAL_7]] : index
-// CHECK:                 %[[VAL_76:.*]] = arith.addi %[[VAL_63]], %[[VAL_7]] : index
-// CHECK:                 scf.yield %[[VAL_64]], %[[VAL_68]], %[[VAL_69]], %[[VAL_75]], %[[VAL_76]] : index, i1, index, index, index
+// CHECK:           ^bb0(%[[VAL_33:.*]]: index, %[[VAL_34:.*]]: index, %[[VAL_35:.*]]: tensor<6x6xi32, #sparse>):
+// CHECK:             %[[VAL_36:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_9]]] : memref<5xindex>
+// CHECK:             %[[VAL_37:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_6]]] : memref<5xindex>
+// CHECK:             memref.store %[[VAL_10]], %[[VAL_20]]{{\[}}%[[VAL_4]]] : memref<5xindex>
+// CHECK:             %[[VAL_38:.*]] = arith.addi %[[VAL_34]], %[[VAL_6]] : index
+// CHECK:             %[[VAL_39:.*]]:5 = scf.while (%[[VAL_40:.*]] = %[[VAL_36]], %[[VAL_41:.*]] = %[[VAL_12]], %[[VAL_42:.*]] = %[[VAL_5]], %[[VAL_43:.*]] = %[[VAL_10]], %[[VAL_44:.*]] = %[[VAL_10]]) : (index, i1, index, index, index) -> (index, i1, index, index, index) {
+// CHECK:               %[[VAL_45:.*]] = arith.cmpi ult, %[[VAL_40]], %[[VAL_37]] : index
+// CHECK:               %[[VAL_46:.*]] = scf.if %[[VAL_45]] -> (i1) {
+// CHECK:                 %[[VAL_47:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_40]]] : memref<?xindex>
+// CHECK:                 %[[VAL_48:.*]] = arith.cmpi ult, %[[VAL_47]], %[[VAL_38]] : index
+// CHECK:                 scf.yield %[[VAL_48]] : i1
+// CHECK:               } else {
+// CHECK:                 scf.yield %[[VAL_12]] : i1
 // CHECK:               }
-// CHECK:               scf.yield %[[VAL_77:.*]]#1, %[[VAL_77]]#2, %[[VAL_77]]#3, %[[VAL_77]]#4 : i1, index, index, index
+// CHECK:               scf.condition(%[[VAL_46]]) %[[VAL_40]], %[[VAL_41]], %[[VAL_42]], %[[VAL_43]], %[[VAL_44]] : index, i1, index, index, index
+// CHECK:             } do {
+// CHECK:             ^bb0(%[[VAL_49:.*]]: index, %[[VAL_50:.*]]: i1, %[[VAL_51:.*]]: index, %[[VAL_52:.*]]: index, %[[VAL_53:.*]]: index):
+// CHECK:               %[[VAL_54:.*]] = arith.addi %[[VAL_49]], %[[VAL_7]] : index
+// CHECK:               %[[VAL_55:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_49]]] : memref<?xindex>
+// CHECK:               %[[VAL_56:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_54]]] : memref<?xindex>
+// CHECK:               %[[VAL_57:.*]] = ar...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/74750


More information about the Mlir-commits mailing list