[Mlir-commits] [mlir] 097d2f1 - [mlir][sparse] optimize memory load to SSA value when generating spar… (#74750)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Dec 7 12:00:29 PST 2023
Author: Peiming Liu
Date: 2023-12-07T12:00:25-08:00
New Revision: 097d2f14173a3bfc1cd44f543f63154fed79e962
URL: https://github.com/llvm/llvm-project/commit/097d2f14173a3bfc1cd44f543f63154fed79e962
DIFF: https://github.com/llvm/llvm-project/commit/097d2f14173a3bfc1cd44f543f63154fed79e962.diff
LOG: [mlir][sparse] optimize memory load to SSA value when generating spar… (#74750)
…se conv kernel.
Added:
Modified:
mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h
mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
index ff8561534a376..75121b5e3ce2e 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
@@ -167,14 +167,6 @@ static void updateSlicePosPtr(OpBuilder &builder, Location loc, Value sPosBuf,
Value pPtr) {
builder.create<memref::StoreOp>(loc, pPtr, sPosBuf, C_IDX(1));
}
-static Value loadSlicePosTupleNum(OpBuilder &builder, Location loc,
- Value sPosBuf) {
- return genIndexLoad(builder, loc, sPosBuf, C_IDX(0));
-}
-static void updateSlicePosTupleNum(OpBuilder &builder, Location loc, Value num,
- Value sPosBuf) {
- builder.create<memref::StoreOp>(loc, num, sPosBuf, C_IDX(0));
-}
// Gets and sets position values for slice-driven loops.
enum class SlicePosKind { kLo, kHi, kNext };
@@ -405,7 +397,7 @@ void LoopEmitter::initialize(ValueRange ts, StringAttr loopTag, bool hasOutput,
sliceMeta[tid].assign(lvlRank, std::vector<std::pair<Value, unsigned>>());
sliceStack[tid].emplace_back(/*minCrd=*/Value(),
/*offset=*/Value(), /*isNonEmpty*/ Value(),
- std::nullopt, 0);
+ /*posTupleNum=*/Value(), std::nullopt, 0);
if (dimGetter && !isSynTensor(tid)) {
for (Level l = 0; l < lvlRank; l++) {
dependentLvlMap[tid][l] = dimGetter(tid, l);
@@ -1797,7 +1789,7 @@ ValueRange LoopEmitter::genUnResolvedSliceTreeTraverse(
unsigned depth = frontSlice.depth - 1;
Value offset = frontSlice.offset;
Value sPtrBuf = slicePosBuffer[tid][firstLvl][depth];
- Value mSz = loadSlicePosTupleNum(builder, loc, sPtrBuf);
+ Value mSz = frontSlice.posTupleNum;
outerMost = builder.create<scf::ForOp>(
loc, c0, mSz, c1, innerArgs,
[this, tid, firstLvl, offset, sPtrBuf, &ip, &pos,
@@ -1908,7 +1900,7 @@ void LoopEmitter::genResolvedSliceBegin(OpBuilder &builder, Location loc,
// Dense slice begin is trivial.
sliceStack[tid].emplace_back(/*minCoord=*/c0, /*offset=*/c0,
/*nonEmpty=*/constantI1(builder, loc, true),
- lvl, /*depth=*/1);
+ c0, lvl, /*depth=*/1);
return;
}
auto [nxSz, stride] = sliceMeta[tid][lvl][1];
@@ -1924,12 +1916,13 @@ void LoopEmitter::genResolvedSliceBegin(OpBuilder &builder, Location loc,
pHi = genIndexLoad(builder, loc, positionsBuffers[tid][lvl],
ADDI(posits[tid][lvl - 1], c1));
}
- // Fills out pIdxBuffer[tid][lvl][0] with [/*memSize =*/4, 0, pLo, pHi]
- updateSlicePosTupleNum(builder, loc, c1, sPtrBuf);
+ // Fills out pIdxBuffer[tid][lvl][0] with [0, pLo, pHi]
updateSlicePosPtr(builder, loc, sPtrBuf, c0);
updateSlicePos(builder, loc, sPtrBuf, pLo, c0, SlicePosKind::kLo);
updateSlicePos(builder, loc, sPtrBuf, pHi, c0, SlicePosKind::kHi);
-
+ // Slice over a resolved parent, we only need one pair of pos hi and lo to
+ // specify the current slice.
+ Value tupleNum = c1;
// This is an non empty tensor if pLo < pHi.
Value isNonEmpty = CMPI(ult, pLo, pHi);
// The minimal coord must be at the first on ordered level.
@@ -1941,7 +1934,7 @@ void LoopEmitter::genResolvedSliceBegin(OpBuilder &builder, Location loc,
// FIXME: We need the relative offset related to the base slice.
Value absOffset = offsetFromMinCoord(builder, loc, minCrd, nxSz, isNonEmpty);
- sliceStack[tid].emplace_back(minCrd, absOffset, isNonEmpty, lvl,
+ sliceStack[tid].emplace_back(minCrd, absOffset, isNonEmpty, tupleNum, lvl,
/*depth=*/1);
}
@@ -1973,8 +1966,8 @@ void LoopEmitter::genUnResolvedSliceBegin(OpBuilder &builder, Location loc,
Value remSz = sliceMeta[tid][lvl][depth + 1].first;
// Dense slice begin is trivial
if (isDenseLT(lvlTypes[tid][lvl])) {
- sliceStack[tid].emplace_back(c0, c0, constantI1(builder, loc, false), lvl,
- depth + 1);
+ sliceStack[tid].emplace_back(c0, c0, constantI1(builder, loc, false), c0,
+ lvl, depth + 1);
return;
}
@@ -2064,11 +2057,11 @@ void LoopEmitter::genUnResolvedSliceBegin(OpBuilder &builder, Location loc,
Value minCrd = result[1];
// Two metadata [memSize, idx].
// TODO: Can use an SSA value for these two metadata
- updateSlicePosTupleNum(builder, loc, result[2], sPtrBuf);
updateSlicePosPtr(builder, loc, sPtrBuf, c0);
// FIXME: we need the relative offset related to the base slice.
Value absOffset = offsetFromMinCoord(builder, loc, minCrd, remSz, isNonEmpty);
- sliceStack[tid].emplace_back(minCrd, absOffset, isNonEmpty, lvl, depth + 1);
+ sliceStack[tid].emplace_back(minCrd, absOffset, isNonEmpty, result[2], lvl,
+ depth + 1);
}
bool LoopEmitter::genSliceBegin(OpBuilder &builder, Location loc, TensorId tid,
@@ -2212,10 +2205,10 @@ LoopEmitter::genSliceNextInduction(OpBuilder &builder, Location loc,
// offset = minCrd - size + 1;
// }
builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
- reduc[2] = absOffset; // restore value.
- Value mSz = loadSlicePosTupleNum(builder, loc, sPtrBuf); // memSize
- reduc[0] = lvlSizes[tid][lvl]; // next min coord
- reduc[1] = constantI1(builder, loc, false); // isNonEmpty
+ reduc[2] = absOffset; // restore value.
+ Value mSz = info.posTupleNum; // tuple number.
+ reduc[0] = lvlSizes[tid][lvl]; // next min coord
+ reduc[1] = constantI1(builder, loc, false); // isNonEmpty
auto loopArgs = static_cast<ValueRange>(reduc).drop_back();
auto forOp = scf::buildLoopNest(
builder, loc, c0, mSz, c1, loopArgs,
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h
index 0bdd9d45e6ae6..5e51cb2110fa1 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h
@@ -298,10 +298,10 @@ class LoopEmitter {
struct SliceInfo final {
// Note that we do not need to create a actual sparse tensor slice but
// instead only need to maintain the metadata of the slice.
- SliceInfo(Value minCrd, Value offset, Value isNonEmpty,
+ SliceInfo(Value minCrd, Value offset, Value isNonEmpty, Value posTupleNum,
std::optional<Level> slicedOnLvl, unsigned depth)
: minCrd(minCrd), offset(offset), isNonEmpty(isNonEmpty),
- slicedOnLvl(slicedOnLvl), depth(depth) {
+ posTupleNum(posTupleNum), slicedOnLvl(slicedOnLvl), depth(depth) {
// TODO: use std::optional<pair<Level, minCrd>>
assert(!slicedOnLvl || minCrd);
}
@@ -309,9 +309,10 @@ class LoopEmitter {
// Whether this is the tensor that has not yet been sliced.
bool isInitialTensor() const { return !slicedOnLvl.has_value(); }
- Value minCrd; // the minimum coordinate of the slice.
- Value offset; // the *absolute* offset of the current slice.
- Value isNonEmpty; // whether the slice is empty.
+ Value minCrd; // the minimum coordinate of the slice.
+ Value offset; // the *absolute* offset of the current slice.
+ Value isNonEmpty; // whether the slice is empty.
+ Value posTupleNum; // The number of position tuples used in the slice.
std::optional<Level> slicedOnLvl; // the level on which the slice is done
unsigned depth; // the depth (relative to dependentDimMap[tid][lvl]).
};
@@ -650,17 +651,6 @@ class LoopEmitter {
std::vector<std::vector<LevelType>> lvlTypes;
// Sparse iteration information for each `(TensorId, Level)` pair.
// These arrays are updated to remain current within the current loop.
- // TODO: Clarify which of these are indexed by dstLvl vs srcLvl.
- //
- /// The collection of positions for a given element (one such collection
- /// for each tensor). This is the position analogue of the "coords"
- /// naming convention.
- ///
- /// FIXME: [CLARIFY_POSITS_LVL] It's unclear which levels are used
- /// to index the `posits` array. On the one hand `genSparseCrd`
- /// uses dstLvl; on the other hand `enterLoopOverTensorAtLvl`,
- /// `prepareLoopOverTensorAtLvl`, and `enterCoIterationOverTensorsAtLvls`
- /// uses srcLvl. So which is it?
std::vector<std::vector<Value>> posits;
/// The collection of coordinates for a given element (one such
/// collection for each tensor).
@@ -704,10 +694,6 @@ class LoopEmitter {
// sliceStack[tid] holds the generated slice stack on tid.
std::vector<std::vector<SliceInfo>> sliceStack;
- /// TODO: not yet used, it should track the current level for each tensor
- /// to help eliminate `lvls` paramters from above APIs.
- /// std::vector<Level> curLvl;
-
//
// Fields which have at most `numLoops` many entries.
//
diff --git a/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir b/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir
index 0f99a0206e4cb..02cc5d1e2ef34 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir
@@ -8,8 +8,8 @@
// CHECK-LABEL: func.func @conv2d_all_sparse_CSR(
-// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xi32, #sparse{{[0-9]*}}>,
-// CHECK-SAME: %[[VAL_1:.*]]: tensor<3x3xi32>) -> tensor<6x6xi32, #sparse{{[0-9]*}}>
+// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xi32, #sparse>,
+// CHECK-SAME: %[[VAL_1:.*]]: tensor<3x3xi32>) -> tensor<6x6xi32, #sparse> {
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant true
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant -2 : index
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 4 : index
@@ -21,250 +21,232 @@
// CHECK-DAG: %[[VAL_10:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[VAL_11:.*]] = arith.constant 0 : i32
// CHECK-DAG: %[[VAL_12:.*]] = arith.constant false
-// CHECK-DAG: %[[VAL_13:.*]] = tensor.empty() : tensor<6x6xi32, #sparse{{[0-9]*}}>
-// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #sparse{{[0-9]*}}> to memref<?xindex>
-// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #sparse{{[0-9]*}}> to memref<?xindex>
-// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xi32, #sparse{{[0-9]*}}> to memref<?xindex>
-// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xi32, #sparse{{[0-9]*}}> to memref<?xindex>
-// CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xi32, #sparse{{[0-9]*}}> to memref<?xi32>
-// CHECK-DAG: %[[VAL_19:.*]] = memref.alloca() : memref<11xindex>
-// CHECK-DAG: %[[VAL_20:.*]] = memref.alloca() : memref<5xindex>
-// CHECK-DAG: %[[VAL_21:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_7]]] : memref<?xindex>
-// CHECK-DAG: memref.store %[[VAL_7]], %[[VAL_20]]{{\[}}%[[VAL_10]]] : memref<5xindex>
-// CHECK-DAG: memref.store %[[VAL_10]], %[[VAL_20]]{{\[}}%[[VAL_7]]] : memref<5xindex>
-// CHECK-DAG: memref.store %[[VAL_10]], %[[VAL_20]]{{\[}}%[[VAL_9]]] : memref<5xindex>
-// CHECK-DAG: memref.store %[[VAL_21]], %[[VAL_20]]{{\[}}%[[VAL_6]]] : memref<5xindex>
+// CHECK-DAG: %[[VAL_13:.*]] = tensor.empty() : tensor<6x6xi32, #sparse>
+// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #sparse> to memref<?xindex>
+// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #sparse> to memref<?xindex>
+// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xi32, #sparse> to memref<?xindex>
+// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xi32, #sparse> to memref<?xindex>
+// CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xi32, #sparse> to memref<?xi32>
+// CHECK: %[[VAL_19:.*]] = memref.alloca() : memref<11xindex>
+// CHECK: %[[VAL_20:.*]] = memref.alloca() : memref<5xindex>
+// CHECK: %[[VAL_21:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_7]]] : memref<?xindex>
+// CHECK: memref.store %[[VAL_10]], %[[VAL_20]]{{\[}}%[[VAL_7]]] : memref<5xindex>
+// CHECK: memref.store %[[VAL_10]], %[[VAL_20]]{{\[}}%[[VAL_9]]] : memref<5xindex>
+// CHECK: memref.store %[[VAL_21]], %[[VAL_20]]{{\[}}%[[VAL_6]]] : memref<5xindex>
// CHECK: %[[VAL_22:.*]] = arith.cmpi ugt, %[[VAL_21]], %[[VAL_10]] : index
// CHECK: %[[VAL_23:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_10]]] : memref<?xindex>
// CHECK: %[[VAL_24:.*]] = arith.cmpi uge, %[[VAL_23]], %[[VAL_6]] : index
// CHECK: %[[VAL_25:.*]] = arith.andi %[[VAL_22]], %[[VAL_24]] : i1
// CHECK: %[[VAL_26:.*]] = arith.addi %[[VAL_23]], %[[VAL_3]] : index
// CHECK: %[[VAL_27:.*]] = arith.select %[[VAL_25]], %[[VAL_26]], %[[VAL_10]] : index
-// CHECK: %[[VAL_28:.*]]:3 = scf.while (%[[VAL_29:.*]] = %[[VAL_22]], %[[VAL_30:.*]] = %[[VAL_23]], %[[VAL_31:.*]] = %[[VAL_27]], %[[VAL_32:.*]] = %[[VAL_13]]) : (i1, index, index, tensor<6x6xi32, #sparse{{[0-9]*}}>) -> (index, index, tensor<6x6xi32, #sparse{{[0-9]*}}>) {
-// CHECK: scf.condition(%[[VAL_29]]) %[[VAL_30]], %[[VAL_31]], %[[VAL_32]] : index, index, tensor<6x6xi32, #sparse{{[0-9]*}}>
+// CHECK: %[[VAL_28:.*]]:3 = scf.while (%[[VAL_29:.*]] = %[[VAL_22]], %[[VAL_30:.*]] = %[[VAL_23]], %[[VAL_31:.*]] = %[[VAL_27]], %[[VAL_32:.*]] = %[[VAL_13]]) : (i1, index, index, tensor<6x6xi32, #sparse>) -> (index, index, tensor<6x6xi32, #sparse>) {
+// CHECK: scf.condition(%[[VAL_29]]) %[[VAL_30]], %[[VAL_31]], %[[VAL_32]] : index, index, tensor<6x6xi32, #sparse>
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_33:.*]]: index, %[[VAL_34:.*]]: index, %[[VAL_35:.*]]: tensor<6x6xi32, #sparse{{[0-9]*}}>):
-// CHECK: %[[VAL_36:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_10]]] : memref<5xindex>
-// CHECK: %[[VAL_37:.*]]:4 = scf.for %[[VAL_38:.*]] = %[[VAL_10]] to %[[VAL_36]] step %[[VAL_7]] iter_args(%[[VAL_39:.*]] = %[[VAL_12]], %[[VAL_40:.*]] = %[[VAL_5]], %[[VAL_41:.*]] = %[[VAL_10]], %[[VAL_42:.*]] = %[[VAL_10]]) -> (i1, index, index, index) {
-// CHECK: %[[VAL_43:.*]] = arith.addi %[[VAL_38]], %[[VAL_9]] : index
-// CHECK: %[[VAL_44:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_43]]] : memref<5xindex>
-// CHECK: %[[VAL_45:.*]] = arith.addi %[[VAL_38]], %[[VAL_6]] : index
-// CHECK: %[[VAL_46:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_45]]] : memref<5xindex>
-// CHECK: %[[VAL_47:.*]] = arith.addi %[[VAL_38]], %[[VAL_4]] : index
-// CHECK: memref.store %[[VAL_42]], %[[VAL_20]]{{\[}}%[[VAL_47]]] : memref<5xindex>
-// CHECK: %[[VAL_48:.*]] = arith.addi %[[VAL_34]], %[[VAL_6]] : index
-// CHECK: %[[VAL_49:.*]]:5 = scf.while (%[[VAL_50:.*]] = %[[VAL_44]], %[[VAL_51:.*]] = %[[VAL_39]], %[[VAL_52:.*]] = %[[VAL_40]], %[[VAL_53:.*]] = %[[VAL_41]], %[[VAL_54:.*]] = %[[VAL_42]]) : (index, i1, index, index, index) -> (index, i1, index, index, index) {
-// CHECK: %[[VAL_55:.*]] = arith.cmpi ult, %[[VAL_50]], %[[VAL_46]] : index
-// CHECK: %[[VAL_56:.*]] = scf.if %[[VAL_55]] -> (i1) {
-// CHECK: %[[VAL_57:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_50]]] : memref<?xindex>
-// CHECK: %[[VAL_58:.*]] = arith.cmpi ult, %[[VAL_57]], %[[VAL_48]] : index
-// CHECK: scf.yield %[[VAL_58]] : i1
-// CHECK: } else {
-// CHECK: scf.yield %[[VAL_12]] : i1
-// CHECK: }
-// CHECK: scf.condition(%[[VAL_56]]) %[[VAL_50]], %[[VAL_51]], %[[VAL_52]], %[[VAL_53]], %[[VAL_54]] : index, i1, index, index, index
-// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_59:.*]]: index, %[[VAL_60:.*]]: i1, %[[VAL_61:.*]]: index, %[[VAL_62:.*]]: index, %[[VAL_63:.*]]: index):
-// CHECK: %[[VAL_64:.*]] = arith.addi %[[VAL_59]], %[[VAL_7]] : index
-// CHECK: %[[VAL_65:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_59]]] : memref<?xindex>
-// CHECK: %[[VAL_66:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_64]]] : memref<?xindex>
-// CHECK: %[[VAL_67:.*]] = arith.cmpi ult, %[[VAL_65]], %[[VAL_66]] : index
-// CHECK: %[[VAL_68:.*]] = arith.ori %[[VAL_67]], %[[VAL_60]] : i1
-// CHECK: %[[VAL_69:.*]] = scf.if %[[VAL_67]] -> (index) {
-// CHECK: %[[VAL_70:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_65]]] : memref<?xindex>
-// CHECK: %[[VAL_71:.*]] = arith.cmpi ult, %[[VAL_70]], %[[VAL_61]] : index
-// CHECK: %[[VAL_72:.*]] = arith.select %[[VAL_71]], %[[VAL_70]], %[[VAL_61]] : index
-// CHECK: scf.yield %[[VAL_72]] : index
-// CHECK: } else {
-// CHECK: scf.yield %[[VAL_61]] : index
-// CHECK: }
-// CHECK: %[[VAL_73:.*]] = arith.addi %[[VAL_62]], %[[VAL_9]] : index
-// CHECK: memref.store %[[VAL_65]], %[[VAL_19]]{{\[}}%[[VAL_73]]] : memref<11xindex>
-// CHECK: %[[VAL_74:.*]] = arith.addi %[[VAL_62]], %[[VAL_8]] : index
-// CHECK: memref.store %[[VAL_66]], %[[VAL_19]]{{\[}}%[[VAL_74]]] : memref<11xindex>
-// CHECK: %[[VAL_75:.*]] = arith.addi %[[VAL_62]], %[[VAL_7]] : index
-// CHECK: %[[VAL_76:.*]] = arith.addi %[[VAL_63]], %[[VAL_7]] : index
-// CHECK: scf.yield %[[VAL_64]], %[[VAL_68]], %[[VAL_69]], %[[VAL_75]], %[[VAL_76]] : index, i1, index, index, index
+// CHECK: ^bb0(%[[VAL_33:.*]]: index, %[[VAL_34:.*]]: index, %[[VAL_35:.*]]: tensor<6x6xi32, #sparse>):
+// CHECK: %[[VAL_36:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_9]]] : memref<5xindex>
+// CHECK: %[[VAL_37:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_6]]] : memref<5xindex>
+// CHECK: memref.store %[[VAL_10]], %[[VAL_20]]{{\[}}%[[VAL_4]]] : memref<5xindex>
+// CHECK: %[[VAL_38:.*]] = arith.addi %[[VAL_34]], %[[VAL_6]] : index
+// CHECK: %[[VAL_39:.*]]:5 = scf.while (%[[VAL_40:.*]] = %[[VAL_36]], %[[VAL_41:.*]] = %[[VAL_12]], %[[VAL_42:.*]] = %[[VAL_5]], %[[VAL_43:.*]] = %[[VAL_10]], %[[VAL_44:.*]] = %[[VAL_10]]) : (index, i1, index, index, index) -> (index, i1, index, index, index) {
+// CHECK: %[[VAL_45:.*]] = arith.cmpi ult, %[[VAL_40]], %[[VAL_37]] : index
+// CHECK: %[[VAL_46:.*]] = scf.if %[[VAL_45]] -> (i1) {
+// CHECK: %[[VAL_47:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_40]]] : memref<?xindex>
+// CHECK: %[[VAL_48:.*]] = arith.cmpi ult, %[[VAL_47]], %[[VAL_38]] : index
+// CHECK: scf.yield %[[VAL_48]] : i1
+// CHECK: } else {
+// CHECK: scf.yield %[[VAL_12]] : i1
// CHECK: }
-// CHECK: scf.yield %[[VAL_77:.*]]#1, %[[VAL_77]]#2, %[[VAL_77]]#3, %[[VAL_77]]#4 : i1, index, index, index
+// CHECK: scf.condition(%[[VAL_46]]) %[[VAL_40]], %[[VAL_41]], %[[VAL_42]], %[[VAL_43]], %[[VAL_44]] : index, i1, index, index, index
+// CHECK: } do {
+// CHECK: ^bb0(%[[VAL_49:.*]]: index, %[[VAL_50:.*]]: i1, %[[VAL_51:.*]]: index, %[[VAL_52:.*]]: index, %[[VAL_53:.*]]: index):
+// CHECK: %[[VAL_54:.*]] = arith.addi %[[VAL_49]], %[[VAL_7]] : index
+// CHECK: %[[VAL_55:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_49]]] : memref<?xindex>
+// CHECK: %[[VAL_56:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_54]]] : memref<?xindex>
+// CHECK: %[[VAL_57:.*]] = arith.cmpi ult, %[[VAL_55]], %[[VAL_56]] : index
+// CHECK: %[[VAL_58:.*]] = arith.ori %[[VAL_57]], %[[VAL_50]] : i1
+// CHECK: %[[VAL_59:.*]] = scf.if %[[VAL_57]] -> (index) {
+// CHECK: %[[VAL_60:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_55]]] : memref<?xindex>
+// CHECK: %[[VAL_61:.*]] = arith.cmpi ult, %[[VAL_60]], %[[VAL_51]] : index
+// CHECK: %[[VAL_62:.*]] = arith.select %[[VAL_61]], %[[VAL_60]], %[[VAL_51]] : index
+// CHECK: scf.yield %[[VAL_62]] : index
+// CHECK: } else {
+// CHECK: scf.yield %[[VAL_51]] : index
+// CHECK: }
+// CHECK: %[[VAL_63:.*]] = arith.addi %[[VAL_52]], %[[VAL_9]] : index
+// CHECK: memref.store %[[VAL_55]], %[[VAL_19]]{{\[}}%[[VAL_63]]] : memref<11xindex>
+// CHECK: %[[VAL_64:.*]] = arith.addi %[[VAL_52]], %[[VAL_8]] : index
+// CHECK: memref.store %[[VAL_56]], %[[VAL_19]]{{\[}}%[[VAL_64]]] : memref<11xindex>
+// CHECK: %[[VAL_65:.*]] = arith.addi %[[VAL_52]], %[[VAL_7]] : index
+// CHECK: %[[VAL_66:.*]] = arith.addi %[[VAL_53]], %[[VAL_7]] : index
+// CHECK: scf.yield %[[VAL_54]], %[[VAL_58]], %[[VAL_59]], %[[VAL_65]], %[[VAL_66]] : index, i1, index, index, index
// CHECK: }
-// CHECK: memref.store %[[VAL_78:.*]]#2, %[[VAL_19]]{{\[}}%[[VAL_10]]] : memref<11xindex>
// CHECK: memref.store %[[VAL_10]], %[[VAL_19]]{{\[}}%[[VAL_7]]] : memref<11xindex>
-// CHECK: %[[VAL_79:.*]] = arith.cmpi uge, %[[VAL_78]]#1, %[[VAL_6]] : index
-// CHECK: %[[VAL_80:.*]] = arith.andi %[[VAL_78]]#0, %[[VAL_79]] : i1
-// CHECK: %[[VAL_81:.*]] = arith.addi %[[VAL_78]]#1, %[[VAL_3]] : index
-// CHECK: %[[VAL_82:.*]] = arith.select %[[VAL_80]], %[[VAL_81]], %[[VAL_10]] : index
-// CHECK: %[[VAL_83:.*]]:3 = scf.while (%[[VAL_84:.*]] = %[[VAL_78]]#0, %[[VAL_85:.*]] = %[[VAL_78]]#1, %[[VAL_86:.*]] = %[[VAL_82]], %[[VAL_87:.*]] = %[[VAL_35]]) : (i1, index, index, tensor<6x6xi32, #sparse{{[0-9]*}}>) -> (index, index, tensor<6x6xi32, #sparse{{[0-9]*}}>) {
-// CHECK: scf.condition(%[[VAL_84]]) %[[VAL_85]], %[[VAL_86]], %[[VAL_87]] : index, index, tensor<6x6xi32, #sparse{{[0-9]*}}>
+// CHECK: %[[VAL_67:.*]] = arith.cmpi uge, %[[VAL_68:.*]]#2, %[[VAL_6]] : index
+// CHECK: %[[VAL_69:.*]] = arith.andi %[[VAL_68]]#1, %[[VAL_67]] : i1
+// CHECK: %[[VAL_70:.*]] = arith.addi %[[VAL_68]]#2, %[[VAL_3]] : index
+// CHECK: %[[VAL_71:.*]] = arith.select %[[VAL_69]], %[[VAL_70]], %[[VAL_10]] : index
+// CHECK: %[[VAL_72:.*]]:3 = scf.while (%[[VAL_73:.*]] = %[[VAL_68]]#1, %[[VAL_74:.*]] = %[[VAL_68]]#2, %[[VAL_75:.*]] = %[[VAL_71]], %[[VAL_76:.*]] = %[[VAL_35]]) : (i1, index, index, tensor<6x6xi32, #sparse>) -> (index, index, tensor<6x6xi32, #sparse>) {
+// CHECK: scf.condition(%[[VAL_73]]) %[[VAL_74]], %[[VAL_75]], %[[VAL_76]] : index, index, tensor<6x6xi32, #sparse>
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_88:.*]]: index, %[[VAL_89:.*]]: index, %[[VAL_90:.*]]: tensor<6x6xi32, #sparse{{[0-9]*}}>):
-// CHECK: %[[VAL_91:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_7]]] : memref<5xindex>
-// CHECK: %[[VAL_92:.*]] = arith.addi %[[VAL_91]], %[[VAL_9]] : index
-// CHECK: %[[VAL_93:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_92]]] : memref<5xindex>
-// CHECK: %[[VAL_94:.*]] = arith.addi %[[VAL_91]], %[[VAL_6]] : index
-// CHECK: %[[VAL_95:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_94]]] : memref<5xindex>
-// CHECK: %[[VAL_96:.*]] = arith.addi %[[VAL_34]], %[[VAL_6]] : index
-// CHECK: %[[VAL_97:.*]]:3 = scf.while (%[[VAL_98:.*]] = %[[VAL_93]], %[[VAL_99:.*]] = %[[VAL_11]], %[[VAL_100:.*]] = %[[VAL_12]]) : (index, i32, i1) -> (index, i32, i1) {
-// CHECK: %[[VAL_101:.*]] = arith.cmpi ult, %[[VAL_98]], %[[VAL_95]] : index
-// CHECK: %[[VAL_102:.*]] = scf.if %[[VAL_101]] -> (i1) {
-// CHECK: %[[VAL_103:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_98]]] : memref<?xindex>
-// CHECK: %[[VAL_104:.*]] = arith.cmpi ult, %[[VAL_103]], %[[VAL_96]] : index
-// CHECK: scf.yield %[[VAL_104]] : i1
+// CHECK: ^bb0(%[[VAL_77:.*]]: index, %[[VAL_78:.*]]: index, %[[VAL_79:.*]]: tensor<6x6xi32, #sparse>):
+// CHECK: %[[VAL_80:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_7]]] : memref<5xindex>
+// CHECK: %[[VAL_81:.*]] = arith.addi %[[VAL_80]], %[[VAL_9]] : index
+// CHECK: %[[VAL_82:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_81]]] : memref<5xindex>
+// CHECK: %[[VAL_83:.*]] = arith.addi %[[VAL_80]], %[[VAL_6]] : index
+// CHECK: %[[VAL_84:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_83]]] : memref<5xindex>
+// CHECK: %[[VAL_85:.*]]:3 = scf.while (%[[VAL_86:.*]] = %[[VAL_82]], %[[VAL_87:.*]] = %[[VAL_11]], %[[VAL_88:.*]] = %[[VAL_12]]) : (index, i32, i1) -> (index, i32, i1) {
+// CHECK: %[[VAL_89:.*]] = arith.cmpi ult, %[[VAL_86]], %[[VAL_84]] : index
+// CHECK: %[[VAL_90:.*]] = scf.if %[[VAL_89]] -> (i1) {
+// CHECK: %[[VAL_91:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_86]]] : memref<?xindex>
+// CHECK: %[[VAL_92:.*]] = arith.cmpi ult, %[[VAL_91]], %[[VAL_38]] : index
+// CHECK: scf.yield %[[VAL_92]] : i1
// CHECK: } else {
// CHECK: scf.yield %[[VAL_12]] : i1
// CHECK: }
-// CHECK: scf.condition(%[[VAL_102]]) %[[VAL_98]], %[[VAL_99]], %[[VAL_100]] : index, i32, i1
+// CHECK: scf.condition(%[[VAL_90]]) %[[VAL_86]], %[[VAL_87]], %[[VAL_88]] : index, i32, i1
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_105:.*]]: index, %[[VAL_106:.*]]: i32, %[[VAL_107:.*]]: i1):
-// CHECK: %[[VAL_108:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_105]]] : memref<?xindex>
-// CHECK: %[[VAL_109:.*]] = arith.subi %[[VAL_108]], %[[VAL_34]] : index
-// CHECK: %[[VAL_110:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_7]]] : memref<11xindex>
-// CHECK: %[[VAL_111:.*]] = arith.addi %[[VAL_110]], %[[VAL_9]] : index
-// CHECK: %[[VAL_112:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_111]]] : memref<11xindex>
-// CHECK: %[[VAL_113:.*]] = arith.addi %[[VAL_110]], %[[VAL_8]] : index
-// CHECK: %[[VAL_114:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_113]]] : memref<11xindex>
-// CHECK: %[[VAL_115:.*]] = arith.addi %[[VAL_89]], %[[VAL_6]] : index
-// CHECK: %[[VAL_116:.*]]:2 = scf.while (%[[VAL_117:.*]] = %[[VAL_112]], %[[VAL_118:.*]] = %[[VAL_106]]) : (index, i32) -> (index, i32) {
-// CHECK: %[[VAL_119:.*]] = arith.cmpi ult, %[[VAL_117]], %[[VAL_114]] : index
-// CHECK: %[[VAL_120:.*]] = scf.if %[[VAL_119]] -> (i1) {
-// CHECK: %[[VAL_121:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_117]]] : memref<?xindex>
-// CHECK: %[[VAL_122:.*]] = arith.cmpi ult, %[[VAL_121]], %[[VAL_115]] : index
-// CHECK: scf.yield %[[VAL_122]] : i1
+// CHECK: ^bb0(%[[VAL_93:.*]]: index, %[[VAL_94:.*]]: i32, %[[VAL_95:.*]]: i1):
+// CHECK: %[[VAL_96:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_93]]] : memref<?xindex>
+// CHECK: %[[VAL_97:.*]] = arith.subi %[[VAL_96]], %[[VAL_34]] : index
+// CHECK: %[[VAL_98:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_7]]] : memref<11xindex>
+// CHECK: %[[VAL_99:.*]] = arith.addi %[[VAL_98]], %[[VAL_9]] : index
+// CHECK: %[[VAL_100:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_99]]] : memref<11xindex>
+// CHECK: %[[VAL_101:.*]] = arith.addi %[[VAL_98]], %[[VAL_8]] : index
+// CHECK: %[[VAL_102:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_101]]] : memref<11xindex>
+// CHECK: %[[VAL_103:.*]] = arith.addi %[[VAL_78]], %[[VAL_6]] : index
+// CHECK: %[[VAL_104:.*]]:2 = scf.while (%[[VAL_105:.*]] = %[[VAL_100]], %[[VAL_106:.*]] = %[[VAL_94]]) : (index, i32) -> (index, i32) {
+// CHECK: %[[VAL_107:.*]] = arith.cmpi ult, %[[VAL_105]], %[[VAL_102]] : index
+// CHECK: %[[VAL_108:.*]] = scf.if %[[VAL_107]] -> (i1) {
+// CHECK: %[[VAL_109:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_105]]] : memref<?xindex>
+// CHECK: %[[VAL_110:.*]] = arith.cmpi ult, %[[VAL_109]], %[[VAL_103]] : index
+// CHECK: scf.yield %[[VAL_110]] : i1
// CHECK: } else {
// CHECK: scf.yield %[[VAL_12]] : i1
// CHECK: }
-// CHECK: scf.condition(%[[VAL_120]]) %[[VAL_117]], %[[VAL_118]] : index, i32
+// CHECK: scf.condition(%[[VAL_108]]) %[[VAL_105]], %[[VAL_106]] : index, i32
// CHECK: } do {
-// CHECK: ^bb0(%[[VAL_123:.*]]: index, %[[VAL_124:.*]]: i32):
-// CHECK: %[[VAL_125:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_123]]] : memref<?xindex>
-// CHECK: %[[VAL_126:.*]] = arith.subi %[[VAL_125]], %[[VAL_89]] : index
-// CHECK: %[[VAL_127:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_123]]] : memref<?xi32>
-// CHECK: %[[VAL_128:.*]] = tensor.extract %[[VAL_1]]{{\[}}%[[VAL_109]], %[[VAL_126]]] : tensor<3x3xi32>
-// CHECK: %[[VAL_129:.*]] = arith.muli %[[VAL_127]], %[[VAL_128]] : i32
-// CHECK: %[[VAL_130:.*]] = arith.addi %[[VAL_124]], %[[VAL_129]] : i32
-// CHECK: %[[VAL_131:.*]] = arith.addi %[[VAL_123]], %[[VAL_7]] : index
-// CHECK: scf.yield %[[VAL_131]], %[[VAL_130]] : index, i32
+// CHECK: ^bb0(%[[VAL_111:.*]]: index, %[[VAL_112:.*]]: i32):
+// CHECK: %[[VAL_113:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_111]]] : memref<?xindex>
+// CHECK: %[[VAL_114:.*]] = arith.subi %[[VAL_113]], %[[VAL_78]] : index
+// CHECK: %[[VAL_115:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_111]]] : memref<?xi32>
+// CHECK: %[[VAL_116:.*]] = tensor.extract %[[VAL_1]]{{\[}}%[[VAL_97]], %[[VAL_114]]] : tensor<3x3xi32>
+// CHECK: %[[VAL_117:.*]] = arith.muli %[[VAL_115]], %[[VAL_116]] : i32
+// CHECK: %[[VAL_118:.*]] = arith.addi %[[VAL_112]], %[[VAL_117]] : i32
+// CHECK: %[[VAL_119:.*]] = arith.addi %[[VAL_111]], %[[VAL_7]] : index
+// CHECK: scf.yield %[[VAL_119]], %[[VAL_118]] : index, i32
// CHECK: }
-// CHECK: %[[VAL_132:.*]] = arith.addi %[[VAL_105]], %[[VAL_7]] : index
-// CHECK: %[[VAL_133:.*]] = arith.addi %[[VAL_110]], %[[VAL_7]] : index
-// CHECK: memref.store %[[VAL_133]], %[[VAL_19]]{{\[}}%[[VAL_7]]] : memref<11xindex>
-// CHECK: scf.yield %[[VAL_132]], %[[VAL_134:.*]]#1, %[[VAL_2]] : index, i32, i1
+// CHECK: %[[VAL_120:.*]] = arith.addi %[[VAL_93]], %[[VAL_7]] : index
+// CHECK: %[[VAL_121:.*]] = arith.addi %[[VAL_98]], %[[VAL_7]] : index
+// CHECK: memref.store %[[VAL_121]], %[[VAL_19]]{{\[}}%[[VAL_7]]] : memref<11xindex>
+// CHECK: scf.yield %[[VAL_120]], %[[VAL_122:.*]]#1, %[[VAL_2]] : index, i32, i1
// CHECK: }
-// CHECK: %[[VAL_135:.*]] = scf.if %[[VAL_136:.*]]#2 -> (tensor<6x6xi32, #sparse{{[0-9]*}}>) {
-// CHECK: %[[VAL_137:.*]] = sparse_tensor.insert %[[VAL_136]]#1 into %[[VAL_90]]{{\[}}%[[VAL_34]], %[[VAL_89]]] : tensor<6x6xi32, #sparse{{[0-9]*}}>
-// CHECK: scf.yield %[[VAL_137]] : tensor<6x6xi32, #sparse{{[0-9]*}}>
+// CHECK: %[[VAL_123:.*]] = scf.if %[[VAL_124:.*]]#2 -> (tensor<6x6xi32, #sparse>) {
+// CHECK: %[[VAL_125:.*]] = sparse_tensor.insert %[[VAL_124]]#1 into %[[VAL_79]]{{\[}}%[[VAL_34]], %[[VAL_78]]] : tensor<6x6xi32, #sparse>
+// CHECK: scf.yield %[[VAL_125]] : tensor<6x6xi32, #sparse>
// CHECK: } else {
-// CHECK: scf.yield %[[VAL_90]] : tensor<6x6xi32, #sparse{{[0-9]*}}>
+// CHECK: scf.yield %[[VAL_79]] : tensor<6x6xi32, #sparse>
// CHECK: }
// CHECK: memref.store %[[VAL_10]], %[[VAL_20]]{{\[}}%[[VAL_7]]] : memref<5xindex>
// CHECK: memref.store %[[VAL_10]], %[[VAL_19]]{{\[}}%[[VAL_7]]] : memref<11xindex>
-// CHECK: %[[VAL_138:.*]] = arith.cmpi ugt, %[[VAL_88]], %[[VAL_89]] : index
-// CHECK: %[[VAL_139:.*]]:3 = scf.if %[[VAL_138]] -> (index, i1, index) {
-// CHECK: %[[VAL_140:.*]] = arith.addi %[[VAL_89]], %[[VAL_7]] : index
-// CHECK: scf.yield %[[VAL_88]], %[[VAL_2]], %[[VAL_140]] : index, i1, index
+// CHECK: %[[VAL_126:.*]] = arith.cmpi ugt, %[[VAL_77]], %[[VAL_78]] : index
+// CHECK: %[[VAL_127:.*]]:3 = scf.if %[[VAL_126]] -> (index, i1, index) {
+// CHECK: %[[VAL_128:.*]] = arith.addi %[[VAL_78]], %[[VAL_7]] : index
+// CHECK: scf.yield %[[VAL_77]], %[[VAL_2]], %[[VAL_128]] : index, i1, index
// CHECK: } else {
-// CHECK: %[[VAL_141:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_10]]] : memref<11xindex>
-// CHECK: %[[VAL_142:.*]]:2 = scf.for %[[VAL_143:.*]] = %[[VAL_10]] to %[[VAL_141]] step %[[VAL_7]] iter_args(%[[VAL_144:.*]] = %[[VAL_5]], %[[VAL_145:.*]] = %[[VAL_12]]) -> (index, i1) {
-// CHECK: %[[VAL_146:.*]] = arith.addi %[[VAL_143]], %[[VAL_9]] : index
-// CHECK: %[[VAL_147:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_146]]] : memref<11xindex>
-// CHECK: %[[VAL_148:.*]] = arith.addi %[[VAL_143]], %[[VAL_8]] : index
-// CHECK: %[[VAL_149:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_148]]] : memref<11xindex>
-// CHECK: %[[VAL_150:.*]] = arith.cmpi ult, %[[VAL_147]], %[[VAL_149]] : index
-// CHECK: %[[VAL_151:.*]] = scf.if %[[VAL_150]] -> (index) {
-// CHECK: %[[VAL_152:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_147]]] : memref<?xindex>
-// CHECK: %[[VAL_153:.*]] = arith.cmpi eq, %[[VAL_152]], %[[VAL_88]] : index
-// CHECK: %[[VAL_154:.*]] = scf.if %[[VAL_153]] -> (index) {
-// CHECK: %[[VAL_155:.*]] = arith.addi %[[VAL_147]], %[[VAL_7]] : index
-// CHECK: memref.store %[[VAL_155]], %[[VAL_19]]{{\[}}%[[VAL_146]]] : memref<11xindex>
-// CHECK: scf.yield %[[VAL_155]] : index
+// CHECK: %[[VAL_129:.*]]:2 = scf.for %[[VAL_130:.*]] = %[[VAL_10]] to %[[VAL_68]]#3 step %[[VAL_7]] iter_args(%[[VAL_131:.*]] = %[[VAL_5]], %[[VAL_132:.*]] = %[[VAL_12]]) -> (index, i1) {
+// CHECK: %[[VAL_133:.*]] = arith.addi %[[VAL_130]], %[[VAL_9]] : index
+// CHECK: %[[VAL_134:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_133]]] : memref<11xindex>
+// CHECK: %[[VAL_135:.*]] = arith.addi %[[VAL_130]], %[[VAL_8]] : index
+// CHECK: %[[VAL_136:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_135]]] : memref<11xindex>
+// CHECK: %[[VAL_137:.*]] = arith.cmpi ult, %[[VAL_134]], %[[VAL_136]] : index
+// CHECK: %[[VAL_138:.*]] = scf.if %[[VAL_137]] -> (index) {
+// CHECK: %[[VAL_139:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_134]]] : memref<?xindex>
+// CHECK: %[[VAL_140:.*]] = arith.cmpi eq, %[[VAL_139]], %[[VAL_77]] : index
+// CHECK: %[[VAL_141:.*]] = scf.if %[[VAL_140]] -> (index) {
+// CHECK: %[[VAL_142:.*]] = arith.addi %[[VAL_134]], %[[VAL_7]] : index
+// CHECK: memref.store %[[VAL_142]], %[[VAL_19]]{{\[}}%[[VAL_133]]] : memref<11xindex>
+// CHECK: scf.yield %[[VAL_142]] : index
// CHECK: } else {
-// CHECK: scf.yield %[[VAL_147]] : index
+// CHECK: scf.yield %[[VAL_134]] : index
// CHECK: }
-// CHECK: scf.yield %[[VAL_154]] : index
+// CHECK: scf.yield %[[VAL_141]] : index
// CHECK: } else {
-// CHECK: scf.yield %[[VAL_147]] : index
+// CHECK: scf.yield %[[VAL_134]] : index
// CHECK: }
-// CHECK: %[[VAL_156:.*]] = arith.cmpi ult, %[[VAL_151]], %[[VAL_149]] : index
-// CHECK: %[[VAL_157:.*]] = scf.if %[[VAL_156]] -> (index) {
-// CHECK: %[[VAL_158:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_151]]] : memref<?xindex>
-// CHECK: scf.yield %[[VAL_158]] : index
+// CHECK: %[[VAL_143:.*]] = arith.cmpi ult, %[[VAL_138]], %[[VAL_136]] : index
+// CHECK: %[[VAL_144:.*]] = scf.if %[[VAL_143]] -> (index) {
+// CHECK: %[[VAL_145:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_138]]] : memref<?xindex>
+// CHECK: scf.yield %[[VAL_145]] : index
// CHECK: } else {
-// CHECK: scf.yield %[[VAL_144]] : index
+// CHECK: scf.yield %[[VAL_131]] : index
// CHECK: }
-// CHECK: %[[VAL_159:.*]] = arith.ori %[[VAL_156]], %[[VAL_145]] : i1
-// CHECK: %[[VAL_160:.*]] = arith.cmpi ult, %[[VAL_157]], %[[VAL_144]] : index
-// CHECK: %[[VAL_161:.*]] = arith.select %[[VAL_160]], %[[VAL_157]], %[[VAL_144]] : index
-// CHECK: scf.yield %[[VAL_161]], %[[VAL_159]] : index, i1
+// CHECK: %[[VAL_146:.*]] = arith.ori %[[VAL_143]], %[[VAL_132]] : i1
+// CHECK: %[[VAL_147:.*]] = arith.cmpi ult, %[[VAL_144]], %[[VAL_131]] : index
+// CHECK: %[[VAL_148:.*]] = arith.select %[[VAL_147]], %[[VAL_144]], %[[VAL_131]] : index
+// CHECK: scf.yield %[[VAL_148]], %[[VAL_146]] : index, i1
// CHECK: }
-// CHECK: %[[VAL_162:.*]] = arith.addi %[[VAL_163:.*]]#0, %[[VAL_7]] : index
-// CHECK: %[[VAL_164:.*]] = arith.addi %[[VAL_163]]#0, %[[VAL_3]] : index
-// CHECK: %[[VAL_165:.*]] = arith.cmpi uge, %[[VAL_162]], %[[VAL_6]] : index
-// CHECK: %[[VAL_166:.*]] = arith.select %[[VAL_165]], %[[VAL_164]], %[[VAL_10]] : index
-// CHECK: scf.yield %[[VAL_163]]#0, %[[VAL_163]]#1, %[[VAL_166]] : index, i1, index
+// CHECK: %[[VAL_149:.*]] = arith.addi %[[VAL_150:.*]]#0, %[[VAL_7]] : index
+// CHECK: %[[VAL_151:.*]] = arith.addi %[[VAL_150]]#0, %[[VAL_3]] : index
+// CHECK: %[[VAL_152:.*]] = arith.cmpi uge, %[[VAL_149]], %[[VAL_6]] : index
+// CHECK: %[[VAL_153:.*]] = arith.select %[[VAL_152]], %[[VAL_151]], %[[VAL_10]] : index
+// CHECK: scf.yield %[[VAL_150]]#0, %[[VAL_150]]#1, %[[VAL_153]] : index, i1, index
// CHECK: }
-// CHECK: %[[VAL_167:.*]] = arith.addi %[[VAL_89]], %[[VAL_7]] : index
-// CHECK: %[[VAL_168:.*]] = arith.cmpi ugt, %[[VAL_169:.*]]#2, %[[VAL_167]] : index
-// CHECK: %[[VAL_170:.*]] = arith.select %[[VAL_168]], %[[VAL_169]]#2, %[[VAL_167]] : index
-// CHECK: %[[VAL_171:.*]] = arith.addi %[[VAL_170]], %[[VAL_6]] : index
-// CHECK: %[[VAL_172:.*]] = arith.cmpi ule, %[[VAL_171]], %[[VAL_5]] : index
-// CHECK: %[[VAL_173:.*]] = arith.andi %[[VAL_169]]#1, %[[VAL_172]] : i1
-// CHECK: scf.yield %[[VAL_173]], %[[VAL_169]]#0, %[[VAL_170]], %[[VAL_135]] : i1, index, index, tensor<6x6xi32, #sparse{{[0-9]*}}>
+// CHECK: %[[VAL_154:.*]] = arith.addi %[[VAL_78]], %[[VAL_7]] : index
+// CHECK: %[[VAL_155:.*]] = arith.cmpi ugt, %[[VAL_156:.*]]#2, %[[VAL_154]] : index
+// CHECK: %[[VAL_157:.*]] = arith.select %[[VAL_155]], %[[VAL_156]]#2, %[[VAL_154]] : index
+// CHECK: %[[VAL_158:.*]] = arith.addi %[[VAL_157]], %[[VAL_6]] : index
+// CHECK: %[[VAL_159:.*]] = arith.cmpi ule, %[[VAL_158]], %[[VAL_5]] : index
+// CHECK: %[[VAL_160:.*]] = arith.andi %[[VAL_156]]#1, %[[VAL_159]] : i1
+// CHECK: scf.yield %[[VAL_160]], %[[VAL_156]]#0, %[[VAL_157]], %[[VAL_123]] : i1, index, index, tensor<6x6xi32, #sparse>
// CHECK: }
// CHECK: memref.store %[[VAL_10]], %[[VAL_20]]{{\[}}%[[VAL_7]]] : memref<5xindex>
-// CHECK: %[[VAL_174:.*]] = arith.cmpi ugt, %[[VAL_33]], %[[VAL_34]] : index
-// CHECK: %[[VAL_175:.*]]:3 = scf.if %[[VAL_174]] -> (index, i1, index) {
-// CHECK: %[[VAL_176:.*]] = arith.addi %[[VAL_34]], %[[VAL_7]] : index
-// CHECK: scf.yield %[[VAL_33]], %[[VAL_2]], %[[VAL_176]] : index, i1, index
+// CHECK: %[[VAL_161:.*]] = arith.cmpi ugt, %[[VAL_33]], %[[VAL_34]] : index
+// CHECK: %[[VAL_162:.*]]:3 = scf.if %[[VAL_161]] -> (index, i1, index) {
+// CHECK: %[[VAL_163:.*]] = arith.addi %[[VAL_34]], %[[VAL_7]] : index
+// CHECK: scf.yield %[[VAL_33]], %[[VAL_2]], %[[VAL_163]] : index, i1, index
// CHECK: } else {
-// CHECK: %[[VAL_177:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_10]]] : memref<5xindex>
-// CHECK: %[[VAL_178:.*]]:2 = scf.for %[[VAL_179:.*]] = %[[VAL_10]] to %[[VAL_177]] step %[[VAL_7]] iter_args(%[[VAL_180:.*]] = %[[VAL_5]], %[[VAL_181:.*]] = %[[VAL_12]]) -> (index, i1) {
-// CHECK: %[[VAL_182:.*]] = arith.addi %[[VAL_179]], %[[VAL_9]] : index
-// CHECK: %[[VAL_183:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_182]]] : memref<5xindex>
-// CHECK: %[[VAL_184:.*]] = arith.addi %[[VAL_179]], %[[VAL_6]] : index
-// CHECK: %[[VAL_185:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_184]]] : memref<5xindex>
-// CHECK: %[[VAL_186:.*]] = arith.cmpi ult, %[[VAL_183]], %[[VAL_185]] : index
-// CHECK: %[[VAL_187:.*]] = scf.if %[[VAL_186]] -> (index) {
-// CHECK: %[[VAL_188:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_183]]] : memref<?xindex>
-// CHECK: %[[VAL_189:.*]] = arith.cmpi eq, %[[VAL_188]], %[[VAL_33]] : index
-// CHECK: %[[VAL_190:.*]] = scf.if %[[VAL_189]] -> (index) {
-// CHECK: %[[VAL_191:.*]] = arith.addi %[[VAL_183]], %[[VAL_7]] : index
-// CHECK: memref.store %[[VAL_191]], %[[VAL_20]]{{\[}}%[[VAL_182]]] : memref<5xindex>
-// CHECK: scf.yield %[[VAL_191]] : index
-// CHECK: } else {
-// CHECK: scf.yield %[[VAL_183]] : index
-// CHECK: }
-// CHECK: scf.yield %[[VAL_190]] : index
-// CHECK: } else {
-// CHECK: scf.yield %[[VAL_183]] : index
-// CHECK: }
-// CHECK: %[[VAL_192:.*]] = arith.cmpi ult, %[[VAL_187]], %[[VAL_185]] : index
-// CHECK: %[[VAL_193:.*]] = scf.if %[[VAL_192]] -> (index) {
-// CHECK: %[[VAL_194:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_187]]] : memref<?xindex>
-// CHECK: scf.yield %[[VAL_194]] : index
+// CHECK: %[[VAL_164:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_9]]] : memref<5xindex>
+// CHECK: %[[VAL_165:.*]] = memref.load %[[VAL_20]]{{\[}}%[[VAL_6]]] : memref<5xindex>
+// CHECK: %[[VAL_166:.*]] = arith.cmpi ult, %[[VAL_164]], %[[VAL_165]] : index
+// CHECK: %[[VAL_167:.*]] = scf.if %[[VAL_166]] -> (index) {
+// CHECK: %[[VAL_168:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_164]]] : memref<?xindex>
+// CHECK: %[[VAL_169:.*]] = arith.cmpi eq, %[[VAL_168]], %[[VAL_33]] : index
+// CHECK: %[[VAL_170:.*]] = scf.if %[[VAL_169]] -> (index) {
+// CHECK: %[[VAL_171:.*]] = arith.addi %[[VAL_164]], %[[VAL_7]] : index
+// CHECK: memref.store %[[VAL_171]], %[[VAL_20]]{{\[}}%[[VAL_9]]] : memref<5xindex>
+// CHECK: scf.yield %[[VAL_171]] : index
// CHECK: } else {
-// CHECK: scf.yield %[[VAL_180]] : index
+// CHECK: scf.yield %[[VAL_164]] : index
// CHECK: }
-// CHECK: %[[VAL_195:.*]] = arith.ori %[[VAL_192]], %[[VAL_181]] : i1
-// CHECK: %[[VAL_196:.*]] = arith.cmpi ult, %[[VAL_193]], %[[VAL_180]] : index
-// CHECK: %[[VAL_197:.*]] = arith.select %[[VAL_196]], %[[VAL_193]], %[[VAL_180]] : index
-// CHECK: scf.yield %[[VAL_197]], %[[VAL_195]] : index, i1
+// CHECK: scf.yield %[[VAL_170]] : index
+// CHECK: } else {
+// CHECK: scf.yield %[[VAL_164]] : index
+// CHECK: }
+// CHECK: %[[VAL_172:.*]] = arith.cmpi ult, %[[VAL_167]], %[[VAL_165]] : index
+// CHECK: %[[VAL_173:.*]] = scf.if %[[VAL_172]] -> (index) {
+// CHECK: %[[VAL_174:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_167]]] : memref<?xindex>
+// CHECK: scf.yield %[[VAL_174]] : index
+// CHECK: } else {
+// CHECK: scf.yield %[[VAL_5]] : index
// CHECK: }
-// CHECK: %[[VAL_198:.*]] = arith.addi %[[VAL_199:.*]]#0, %[[VAL_7]] : index
-// CHECK: %[[VAL_200:.*]] = arith.addi %[[VAL_199]]#0, %[[VAL_3]] : index
-// CHECK: %[[VAL_201:.*]] = arith.cmpi uge, %[[VAL_198]], %[[VAL_6]] : index
-// CHECK: %[[VAL_202:.*]] = arith.select %[[VAL_201]], %[[VAL_200]], %[[VAL_10]] : index
-// CHECK: scf.yield %[[VAL_199]]#0, %[[VAL_199]]#1, %[[VAL_202]] : index, i1, index
+// CHECK: %[[VAL_175:.*]] = arith.cmpi ult, %[[VAL_173]], %[[VAL_5]] : index
+// CHECK: %[[VAL_176:.*]] = arith.select %[[VAL_175]], %[[VAL_173]], %[[VAL_5]] : index
+// CHECK: %[[VAL_177:.*]] = arith.addi %[[VAL_176]], %[[VAL_7]] : index
+// CHECK: %[[VAL_178:.*]] = arith.addi %[[VAL_176]], %[[VAL_3]] : index
+// CHECK: %[[VAL_179:.*]] = arith.cmpi uge, %[[VAL_177]], %[[VAL_6]] : index
+// CHECK: %[[VAL_180:.*]] = arith.select %[[VAL_179]], %[[VAL_178]], %[[VAL_10]] : index
+// CHECK: scf.yield %[[VAL_176]], %[[VAL_172]], %[[VAL_180]] : index, i1, index
// CHECK: }
-// CHECK: %[[VAL_203:.*]] = arith.addi %[[VAL_34]], %[[VAL_7]] : index
-// CHECK: %[[VAL_204:.*]] = arith.cmpi ugt, %[[VAL_205:.*]]#2, %[[VAL_203]] : index
-// CHECK: %[[VAL_206:.*]] = arith.select %[[VAL_204]], %[[VAL_205]]#2, %[[VAL_203]] : index
-// CHECK: %[[VAL_207:.*]] = arith.addi %[[VAL_206]], %[[VAL_6]] : index
-// CHECK: %[[VAL_208:.*]] = arith.cmpi ule, %[[VAL_207]], %[[VAL_5]] : index
-// CHECK: %[[VAL_209:.*]] = arith.andi %[[VAL_205]]#1, %[[VAL_208]] : i1
-// CHECK: scf.yield %[[VAL_209]], %[[VAL_205]]#0, %[[VAL_206]], %[[VAL_210:.*]]#2 : i1, index, index, tensor<6x6xi32, #sparse{{[0-9]*}}>
+// CHECK: %[[VAL_181:.*]] = arith.addi %[[VAL_34]], %[[VAL_7]] : index
+// CHECK: %[[VAL_182:.*]] = arith.cmpi ugt, %[[VAL_183:.*]]#2, %[[VAL_181]] : index
+// CHECK: %[[VAL_184:.*]] = arith.select %[[VAL_182]], %[[VAL_183]]#2, %[[VAL_181]] : index
+// CHECK: %[[VAL_185:.*]] = arith.addi %[[VAL_184]], %[[VAL_6]] : index
+// CHECK: %[[VAL_186:.*]] = arith.cmpi ule, %[[VAL_185]], %[[VAL_5]] : index
+// CHECK: %[[VAL_187:.*]] = arith.andi %[[VAL_183]]#1, %[[VAL_186]] : i1
+// CHECK: scf.yield %[[VAL_187]], %[[VAL_183]]#0, %[[VAL_184]], %[[VAL_188:.*]]#2 : i1, index, index, tensor<6x6xi32, #sparse>
// CHECK: }
-// CHECK: %[[VAL_211:.*]] = sparse_tensor.load %[[VAL_212:.*]]#2 hasInserts : tensor<6x6xi32, #sparse{{[0-9]*}}>
-// CHECK: return %[[VAL_211]] : tensor<6x6xi32, #sparse{{[0-9]*}}>
+// CHECK: %[[VAL_189:.*]] = sparse_tensor.load %[[VAL_190:.*]]#2 hasInserts : tensor<6x6xi32, #sparse>
+// CHECK: return %[[VAL_189]] : tensor<6x6xi32, #sparse>
// CHECK: }
func.func @conv2d_all_sparse_CSR(%arg0: tensor<8x8xi32, #DCSR>,
%arg1: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
More information about the Mlir-commits
mailing list