[Mlir-commits] [mlir] 853d704 - [mlir][sparse] moving inbound check for slice driven loop into before block of the WhileOp

Peiming Liu llvmlistbot at llvm.org
Fri Jun 9 10:03:20 PDT 2023


Author: Peiming Liu
Date: 2023-06-09T17:03:15Z
New Revision: 853d704fd0c4744731870df67ade7249d5440438

URL: https://github.com/llvm/llvm-project/commit/853d704fd0c4744731870df67ade7249d5440438
DIFF: https://github.com/llvm/llvm-project/commit/853d704fd0c4744731870df67ade7249d5440438.diff

LOG: [mlir][sparse] moving inbound check for slice driven loop into before block of the WhileOp

This patch changes the while loop generated for iterating over a fully reduced sparse level with affine index expression.
Before:
```
cont = true
while (cont) {
  if (inBound()) {
    ....
    cont = true;
  } else {
    cont = false;
  }
}
```
After:
```
while(inBound()) {
  ....
}
```

Reviewed By: K-Wu

Differential Revision: https://reviews.llvm.org/D152463

Added: 
    

Modified: 
    mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
    mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
index 6a639efb2b337..a8a36950ccd7f 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
@@ -1439,25 +1439,42 @@ const LoopEmitter::SliceInfo &LoopEmitter::getMostRecentSliceOnLvl(TensorId tid,
   llvm_unreachable("Failed to find sliceInfo");
 }
 
+static Value genSparseReducedAffineCond(OpBuilder &builder, Location loc,
+                                        Value crdBuf, Value crdHi, Value posit,
+                                        Value posHi, Value cont) {
+  Value inBound = CMPI(ult, posit, posHi);
+  TypeRange types{cont.getType()};
+
+  auto ifOp = builder.create<scf::IfOp>(loc, types, inBound, true);
+  // if (inbound)
+  //   yield coord < crdHi
+  builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
+  Value crd = genIndexLoad(builder, loc, crdBuf, posit);
+  YIELD(CMPI(ult, crd, crdHi));
+  // else
+  //   yield false
+  builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
+  YIELD(constantI1(builder, loc, false));
+
+  builder.setInsertionPointAfter(ifOp);
+  return ifOp.getResult(0);
+}
+
 // Generates a while loop to iterate over a slice sparse level as follows.
 //
-// while(loopLo < loopHi) {
-//   if (coords[loopLo] < offset + size) {
-//     body_builder
-//   } else {
-//    break;
-//   }
+// while(coords[loopLo] < offset + size) {
+//   body_builder
 //   loopLo ++;
 // }
 std::pair<Operation *, ValueRange> LoopEmitter::genSliceLvlTraverseLoop(
-    OpBuilder &builder, Location loc, Value loopLo, Value loopHi, Value offset,
+    OpBuilder &builder, Location loc, Value posLo, Value posHi, Value offset,
     Value size, TensorId tid, Level lvl, ValueRange userReduc, bool genYield,
     LoopBodyBuilder bodyBuilder) {
   Value c1 = C_IDX(1);
   Value sliceHi = ADDI(offset, sliceSizes[tid][lvl].back());
 
   SmallVector<Value> reduc = {
-      loopLo,                         // loop lower bounds
+      posLo,                          // loop lower bounds
       constantI1(builder, loc, true), // continue
   };
   // Append user required reduction value.
@@ -1465,23 +1482,24 @@ std::pair<Operation *, ValueRange> LoopEmitter::genSliceLvlTraverseLoop(
   scf::WhileOp whileOp = builder.create<scf::WhileOp>(
       loc, ValueRange(reduc).getTypes(), reduc,
       /*beforeBuilder=*/
-      [loopHi](OpBuilder &builder, Location loc, ValueRange args) {
-        Value lo = args[0];
-        Value cont = args[1];
-        Value inBound = CMPI(ult, lo, loopHi);
-        Value cond = ANDI(cont, inBound);
+      [this, posHi, sliceHi, tid, lvl](OpBuilder &builder, Location loc,
+                                       ValueRange args) {
+        Value cond = genSparseReducedAffineCond(
+            builder, loc, coordinatesBuffers[tid][lvl], sliceHi, args[0], posHi,
+            args[1]);
         // continue if not yet break nor out of bound.
         builder.create<scf::ConditionOp>(loc, cond, args);
       },
       /*afterBuilder=*/
-      [this, c1, tid, lvl, sliceHi, genYield,
-       bodyBuilder](OpBuilder &builder, Location loc, ValueRange args) {
+      [c1, genYield, bodyBuilder](OpBuilder &builder, Location loc,
+                                  ValueRange args) {
         Value iv = args[0];
-        Value coord =
-            genIndexLoad(builder, loc, coordinatesBuffers[tid][lvl], iv);
-        Value cont = CMPI(ult, coord, sliceHi);
         TypeRange types = args.drop_front(2).getTypes();
-
+        // The coordinate must be in bound as guaranteed by the loop condition.
+        // We generate a fake if operation here only to hide the two extra loop
+        // induction variable maintained by us from user, and relies on later
+        // optimization pass to remove it.
+        Value cont = constantI1(builder, loc, true);
         auto ifOp = builder.create<scf::IfOp>(loc, types, cont,
                                               /*withElseBlock=*/!types.empty());
         {

diff  --git a/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir b/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir
index 620746ec95d04..d1620125a43ed 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir
@@ -1,271 +1,264 @@
-// RUN: mlir-opt %s --sparsification="enable-index-reduction=true" --cse | FileCheck %s
+// RUN: mlir-opt %s --sparsification="enable-index-reduction=true" --canonicalize --cse | FileCheck %s
 
 #map = affine_map<(d0, d1, d2, d3) -> (d0 + d2, d1 + d3)>
 #map1 = affine_map<(d0, d1, d2, d3) -> (d2, d3)>
 #map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1)>
 
 #DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>
-
 // CHECK-LABEL:   func.func @conv2d_all_sparse_CSR(
-// CHECK-SAME:      %[[VAL_0:.*]]: tensor<8x8xi32, #{{.*}}>,
-// CHECK-SAME:      %[[VAL_1:.*]]: tensor<3x3xi32>) -> tensor<6x6xi32, #{{.*}}> {
-// CHECK-DAG:       %[[VAL_2:.*]] = arith.constant 8 : index
-// CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 3 : index
-// CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 0 : index
-// CHECK-DAG:       %[[VAL_5:.*]] = arith.constant 1 : index
-// CHECK-DAG:       %[[VAL_6:.*]] = arith.constant 2 : index
-// CHECK-DAG:       %[[VAL_7:.*]] = arith.constant 4 : index
-// CHECK-DAG:       %[[VAL_8:.*]] = arith.constant 0 : i32
-// CHECK-DAG:       %[[VAL_9:.*]] = arith.constant true
-// CHECK-DAG:       %[[VAL_10:.*]] = arith.constant false
-// CHECK-DAG:       %[[VAL_11:.*]] = bufferization.alloc_tensor() : tensor<6x6xi32, #{{.*}}>
-// CHECK-DAG:       %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #{{.*}}> to memref<?xindex>
-// CHECK-DAG:       %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #{{.*}}> to memref<?xindex>
-// CHECK-DAG:       %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xi32, #{{.*}}> to memref<?xindex>
-// CHECK-DAG:       %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xi32, #{{.*}}> to memref<?xindex>
-// CHECK-DAG:       %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xi32, #{{.*}}> to memref<?xi32>
-// CHECK-DAG:       %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_1]] : memref<3x3xi32>
-// CHECK-DAG:       %[[VAL_18:.*]] = memref.alloca(%[[VAL_2]]) : memref<?xindex>
-// CHECK-DAG:       %[[VAL_19:.*]] = memref.alloca(%[[VAL_7]]) : memref<?xindex>
-// CHECK:           %[[VAL_20:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK:           memref.store %[[VAL_7]], %[[VAL_19]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK:           memref.store %[[VAL_4]], %[[VAL_19]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK:           memref.store %[[VAL_4]], %[[VAL_19]]{{\[}}%[[VAL_6]]] : memref<?xindex>
-// CHECK:           memref.store %[[VAL_20]], %[[VAL_19]]{{\[}}%[[VAL_3]]] : memref<?xindex>
-// CHECK:           %[[VAL_21:.*]] = arith.cmpi ugt, %[[VAL_20]], %[[VAL_4]] : index
-// CHECK:           %[[VAL_22:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK:           %[[VAL_23:.*]] = arith.cmpi uge, %[[VAL_22]], %[[VAL_3]] : index
+// CHECK-SAME:      %[[VAL_0:.*]]: tensor<8x8xi32, #sparse_tensor.encoding<{{.*}}>>,
+// CHECK-SAME:      %[[VAL_1:.*]]: tensor<3x3xi32>)
+// CHECK-DAG:       %[[VAL_2:.*]] = arith.constant true
+// CHECK-DAG:       %[[VAL_3:.*]] = arith.constant -2 : index
+// CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 8 : index
+// CHECK-DAG:       %[[VAL_5:.*]] = arith.constant 3 : index
+// CHECK-DAG:       %[[VAL_6:.*]] = arith.constant 4 : index
+// CHECK-DAG:       %[[VAL_7:.*]] = arith.constant 2 : index
+// CHECK-DAG:       %[[VAL_8:.*]] = arith.constant 0 : index
+// CHECK-DAG:       %[[VAL_9:.*]] = arith.constant 1 : index
+// CHECK-DAG:       %[[VAL_10:.*]] = arith.constant 0 : i32
+// CHECK-DAG:       %[[VAL_11:.*]] = arith.constant false
+// CHECK-DAG:       %[[VAL_12:.*]] = bufferization.alloc_tensor() : tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>
+// CHECK-DAG:       %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_15:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xi32, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_16:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xi32, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
+// CHECK-DAG:       %[[VAL_17:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xi32, #sparse_tensor.encoding<{{.*}}>> to memref<?xi32>
+// CHECK-DAG:       %[[VAL_18:.*]] = memref.alloca() : memref<8xindex>
+// CHECK-DAG:       %[[VAL_19:.*]] = memref.alloca() : memref<4xindex>
+// CHECK-DAG:       %[[VAL_20:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_9]]] : memref<?xindex>
+// CHECK:           memref.store %[[VAL_6]], %[[VAL_19]]{{\[}}%[[VAL_8]]] : memref<4xindex>
+// CHECK:           memref.store %[[VAL_8]], %[[VAL_19]]{{\[}}%[[VAL_9]]] : memref<4xindex>
+// CHECK:           memref.store %[[VAL_8]], %[[VAL_19]]{{\[}}%[[VAL_7]]] : memref<4xindex>
+// CHECK:           memref.store %[[VAL_20]], %[[VAL_19]]{{\[}}%[[VAL_5]]] : memref<4xindex>
+// CHECK:           %[[VAL_21:.*]] = arith.cmpi ugt, %[[VAL_20]], %[[VAL_8]] : index
+// CHECK:           %[[VAL_22:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_8]]] : memref<?xindex>
+// CHECK:           %[[VAL_23:.*]] = arith.cmpi uge, %[[VAL_22]], %[[VAL_5]] : index
 // CHECK:           %[[VAL_24:.*]] = arith.andi %[[VAL_21]], %[[VAL_23]] : i1
-// CHECK:           %[[VAL_25:.*]] = arith.addi %[[VAL_22]], %[[VAL_5]] : index
-// CHECK:           %[[VAL_26:.*]] = arith.subi %[[VAL_25]], %[[VAL_3]] : index
-// CHECK:           %[[VAL_27:.*]] = arith.select %[[VAL_24]], %[[VAL_26]], %[[VAL_4]] : index
-// CHECK:           %[[VAL_28:.*]]:4 = scf.while (%[[VAL_29:.*]] = %[[VAL_21]], %[[VAL_30:.*]] = %[[VAL_22]], %[[VAL_31:.*]] = %[[VAL_27]], %[[VAL_32:.*]] = %[[VAL_11]]) : (i1, index, index, tensor<6x6xi32, #{{.*}}>) -> (i1, index, index, tensor<6x6xi32, #{{.*}}>) {
-// CHECK:             scf.condition(%[[VAL_29]]) %[[VAL_29]], %[[VAL_30]], %[[VAL_31]], %[[VAL_32]] : i1, index, index, tensor<6x6xi32, #{{.*}}>
+// CHECK:           %[[VAL_25:.*]] = arith.addi %[[VAL_22]], %[[VAL_3]] : index
+// CHECK:           %[[VAL_26:.*]] = arith.select %[[VAL_24]], %[[VAL_25]], %[[VAL_8]] : index
+// CHECK:           %[[VAL_27:.*]]:3 = scf.while (%[[VAL_28:.*]] = %[[VAL_21]], %[[VAL_29:.*]] = %[[VAL_22]], %[[VAL_30:.*]] = %[[VAL_26]], %[[VAL_31:.*]] = %[[VAL_12]]) : (i1, index, index, tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>) -> (index, index, tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>) {
+// CHECK:             scf.condition(%[[VAL_28]]) %[[VAL_29]], %[[VAL_30]], %[[VAL_31]] : index, index, tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:           } do {
-// CHECK:           ^bb0(%[[VAL_33:.*]]: i1, %[[VAL_34:.*]]: index, %[[VAL_35:.*]]: index, %[[VAL_36:.*]]: tensor<6x6xi32, #{{.*}}>):
-// CHECK:             %[[VAL_37:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK:             %[[VAL_38:.*]]:3 = scf.for %[[VAL_39:.*]] = %[[VAL_6]] to %[[VAL_37]] step %[[VAL_6]] iter_args(%[[VAL_40:.*]] = %[[VAL_10]], %[[VAL_41:.*]] = %[[VAL_2]], %[[VAL_42:.*]] = %[[VAL_6]]) -> (i1, index, index) {
-// CHECK:               %[[VAL_43:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_39]]] : memref<?xindex>
-// CHECK:               %[[VAL_44:.*]] = arith.addi %[[VAL_39]], %[[VAL_5]] : index
-// CHECK:               %[[VAL_45:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_44]]] : memref<?xindex>
-// CHECK:               %[[VAL_46:.*]] = arith.addi %[[VAL_35]], %[[VAL_3]] : index
-// CHECK:               %[[VAL_47:.*]]:5 = scf.while (%[[VAL_48:.*]] = %[[VAL_43]], %[[VAL_49:.*]] = %[[VAL_9]], %[[VAL_50:.*]] = %[[VAL_40]], %[[VAL_51:.*]] = %[[VAL_41]], %[[VAL_52:.*]] = %[[VAL_42]]) : (index, i1, i1, index, index) -> (index, i1, i1, index, index) {
-// CHECK:                 %[[VAL_53:.*]] = arith.cmpi ult, %[[VAL_48]], %[[VAL_45]] : index
-// CHECK:                 %[[VAL_54:.*]] = arith.andi %[[VAL_49]], %[[VAL_53]] : i1
-// CHECK:                 scf.condition(%[[VAL_54]]) %[[VAL_48]], %[[VAL_49]], %[[VAL_50]], %[[VAL_51]], %[[VAL_52]] : index, i1, i1, index, index
+// CHECK:           ^bb0(%[[VAL_32:.*]]: index, %[[VAL_33:.*]]: index, %[[VAL_34:.*]]: tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>):
+// CHECK:             %[[VAL_35:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_8]]] : memref<4xindex>
+// CHECK:             %[[VAL_36:.*]]:3 = scf.for %[[VAL_37:.*]] = %[[VAL_7]] to %[[VAL_35]] step %[[VAL_7]] iter_args(%[[VAL_38:.*]] = %[[VAL_11]], %[[VAL_39:.*]] = %[[VAL_4]], %[[VAL_40:.*]] = %[[VAL_7]]) -> (i1, index, index) {
+// CHECK:               %[[VAL_41:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_37]]] : memref<4xindex>
+// CHECK:               %[[VAL_42:.*]] = arith.addi %[[VAL_37]], %[[VAL_9]] : index
+// CHECK:               %[[VAL_43:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_42]]] : memref<4xindex>
+// CHECK:               %[[VAL_44:.*]] = arith.addi %[[VAL_33]], %[[VAL_5]] : index
+// CHECK:               %[[VAL_45:.*]]:4 = scf.while (%[[VAL_46:.*]] = %[[VAL_41]], %[[VAL_47:.*]] = %[[VAL_38]], %[[VAL_48:.*]] = %[[VAL_39]], %[[VAL_49:.*]] = %[[VAL_40]]) : (index, i1, index, index) -> (index, i1, index, index) {
+// CHECK:                 %[[VAL_50:.*]] = arith.cmpi ult, %[[VAL_46]], %[[VAL_43]] : index
+// CHECK:                 %[[VAL_51:.*]] = scf.if %[[VAL_50]] -> (i1) {
+// CHECK:                   %[[VAL_52:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_46]]] : memref<?xindex>
+// CHECK:                   %[[VAL_53:.*]] = arith.cmpi ult, %[[VAL_52]], %[[VAL_44]] : index
+// CHECK:                   scf.yield %[[VAL_53]] : i1
+// CHECK:                 } else {
+// CHECK:                   scf.yield %[[VAL_11]] : i1
+// CHECK:                 }
+// CHECK:                 scf.condition(%[[VAL_54:.*]]) %[[VAL_46]], %[[VAL_47]], %[[VAL_48]], %[[VAL_49]] : index, i1, index, index
 // CHECK:               } do {
-// CHECK:               ^bb0(%[[VAL_55:.*]]: index, %[[VAL_56:.*]]: i1, %[[VAL_57:.*]]: i1, %[[VAL_58:.*]]: index, %[[VAL_59:.*]]: index):
-// CHECK:                 %[[VAL_60:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_55]]] : memref<?xindex>
-// CHECK:                 %[[VAL_61:.*]] = arith.cmpi ult, %[[VAL_60]], %[[VAL_46]] : index
-// CHECK:                 %[[VAL_62:.*]]:3 = scf.if %[[VAL_61]] -> (i1, index, index) {
-// CHECK:                   %[[VAL_63:.*]] = arith.addi %[[VAL_55]], %[[VAL_5]] : index
-// CHECK:                   %[[VAL_64:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_55]]] : memref<?xindex>
-// CHECK:                   %[[VAL_65:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_63]]] : memref<?xindex>
-// CHECK:                   %[[VAL_66:.*]] = arith.cmpi ult, %[[VAL_64]], %[[VAL_65]] : index
-// CHECK:                   %[[VAL_67:.*]] = arith.ori %[[VAL_66]], %[[VAL_57]] : i1
-// CHECK:                   %[[VAL_68:.*]] = scf.if %[[VAL_66]] -> (index) {
-// CHECK:                     %[[VAL_69:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_64]]] : memref<?xindex>
-// CHECK:                     %[[VAL_70:.*]] = arith.cmpi ult, %[[VAL_69]], %[[VAL_58]] : index
-// CHECK:                     %[[VAL_71:.*]] = arith.select %[[VAL_70]], %[[VAL_69]], %[[VAL_58]] : index
-// CHECK:                     scf.yield %[[VAL_71]] : index
-// CHECK:                   } else {
-// CHECK:                     scf.yield %[[VAL_58]] : index
-// CHECK:                   }
-// CHECK:                   memref.store %[[VAL_64]], %[[VAL_18]]{{\[}}%[[VAL_59]]] : memref<?xindex>
-// CHECK:                   %[[VAL_72:.*]] = arith.addi %[[VAL_59]], %[[VAL_5]] : index
-// CHECK:                   memref.store %[[VAL_65]], %[[VAL_18]]{{\[}}%[[VAL_72]]] : memref<?xindex>
-// CHECK:                   %[[VAL_73:.*]] = arith.addi %[[VAL_59]], %[[VAL_6]] : index
-// CHECK:                   scf.yield %[[VAL_67]], %[[VAL_74:.*]], %[[VAL_73]] : i1, index, index
+// CHECK:               ^bb0(%[[VAL_55:.*]]: index, %[[VAL_56:.*]]: i1, %[[VAL_57:.*]]: index, %[[VAL_58:.*]]: index):
+// CHECK:                 %[[VAL_59:.*]] = arith.addi %[[VAL_55]], %[[VAL_9]] : index
+// CHECK:                 %[[VAL_60:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_55]]] : memref<?xindex>
+// CHECK:                 %[[VAL_61:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_59]]] : memref<?xindex>
+// CHECK:                 %[[VAL_62:.*]] = arith.cmpi ult, %[[VAL_60]], %[[VAL_61]] : index
+// CHECK:                 %[[VAL_63:.*]] = arith.ori %[[VAL_62]], %[[VAL_56]] : i1
+// CHECK:                 %[[VAL_64:.*]] = scf.if %[[VAL_62]] -> (index) {
+// CHECK:                   %[[VAL_65:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_60]]] : memref<?xindex>
+// CHECK:                   %[[VAL_66:.*]] = arith.cmpi ult, %[[VAL_65]], %[[VAL_57]] : index
+// CHECK:                   %[[VAL_67:.*]] = arith.select %[[VAL_66]], %[[VAL_65]], %[[VAL_57]] : index
+// CHECK:                   scf.yield %[[VAL_67]] : index
 // CHECK:                 } else {
-// CHECK:                   scf.yield %[[VAL_57]], %[[VAL_58]], %[[VAL_59]] : i1, index, index
-// CHECK:                 } {"Emitted from" = "slice"}
-// CHECK:                 %[[VAL_75:.*]] = arith.addi %[[VAL_55]], %[[VAL_5]] : index
-// CHECK:                 scf.yield %[[VAL_75]], %[[VAL_61]], %[[VAL_76:.*]]#0, %[[VAL_76]]#1, %[[VAL_76]]#2 : index, i1, i1, index, index
+// CHECK:                   scf.yield %[[VAL_57]] : index
+// CHECK:                 }
+// CHECK:                 memref.store %[[VAL_60]], %[[VAL_18]]{{\[}}%[[VAL_58]]] : memref<8xindex>
+// CHECK:                 %[[VAL_68:.*]] = arith.addi %[[VAL_58]], %[[VAL_9]] : index
+// CHECK:                 memref.store %[[VAL_61]], %[[VAL_18]]{{\[}}%[[VAL_68]]] : memref<8xindex>
+// CHECK:                 %[[VAL_69:.*]] = arith.addi %[[VAL_58]], %[[VAL_7]] : index
+// CHECK:                 scf.yield %[[VAL_59]], %[[VAL_63]], %[[VAL_70:.*]], %[[VAL_69]] : index, i1, index, index
 // CHECK:               }
-// CHECK:               scf.yield %[[VAL_77:.*]]#2, %[[VAL_77]]#3, %[[VAL_77]]#4 : i1, index, index
+// CHECK:               scf.yield %[[VAL_71:.*]]#1, %[[VAL_71]]#2, %[[VAL_71]]#3 : i1, index, index
 // CHECK:             }
-// CHECK:             memref.store %[[VAL_78:.*]]#2, %[[VAL_18]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK:             memref.store %[[VAL_4]], %[[VAL_18]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK:             %[[VAL_79:.*]] = arith.cmpi uge, %[[VAL_78]]#1, %[[VAL_3]] : index
-// CHECK:             %[[VAL_80:.*]] = arith.andi %[[VAL_78]]#0, %[[VAL_79]] : i1
-// CHECK:             %[[VAL_81:.*]] = arith.addi %[[VAL_78]]#1, %[[VAL_5]] : index
-// CHECK:             %[[VAL_82:.*]] = arith.subi %[[VAL_81]], %[[VAL_3]] : index
-// CHECK:             %[[VAL_83:.*]] = arith.select %[[VAL_80]], %[[VAL_82]], %[[VAL_4]] : index
-// CHECK:             %[[VAL_84:.*]]:4 = scf.while (%[[VAL_85:.*]] = %[[VAL_78]]#0, %[[VAL_86:.*]] = %[[VAL_78]]#1, %[[VAL_87:.*]] = %[[VAL_83]], %[[VAL_88:.*]] = %[[VAL_36]]) : (i1, index, index, tensor<6x6xi32, #{{.*}}>) -> (i1, index, index, tensor<6x6xi32, #{{.*}}>) {
-// CHECK:               scf.condition(%[[VAL_85]]) %[[VAL_85]], %[[VAL_86]], %[[VAL_87]], %[[VAL_88]] : i1, index, index, tensor<6x6xi32, #{{.*}}>
+// CHECK:             memref.store %[[VAL_72:.*]]#2, %[[VAL_18]]{{\[}}%[[VAL_8]]] : memref<8xindex>
+// CHECK:             memref.store %[[VAL_8]], %[[VAL_18]]{{\[}}%[[VAL_9]]] : memref<8xindex>
+// CHECK:             %[[VAL_73:.*]] = arith.cmpi uge, %[[VAL_72]]#1, %[[VAL_5]] : index
+// CHECK:             %[[VAL_74:.*]] = arith.andi %[[VAL_72]]#0, %[[VAL_73]] : i1
+// CHECK:             %[[VAL_75:.*]] = arith.addi %[[VAL_72]]#1, %[[VAL_3]] : index
+// CHECK:             %[[VAL_76:.*]] = arith.select %[[VAL_74]], %[[VAL_75]], %[[VAL_8]] : index
+// CHECK:             %[[VAL_77:.*]]:3 = scf.while (%[[VAL_78:.*]] = %[[VAL_72]]#0, %[[VAL_79:.*]] = %[[VAL_72]]#1, %[[VAL_80:.*]] = %[[VAL_76]], %[[VAL_81:.*]] = %[[VAL_34]]) : (i1, index, index, tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>) -> (index, index, tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>) {
+// CHECK:               scf.condition(%[[VAL_78]]) %[[VAL_79]], %[[VAL_80]], %[[VAL_81]] : index, index, tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:             } do {
-// CHECK:             ^bb0(%[[VAL_89:.*]]: i1, %[[VAL_90:.*]]: index, %[[VAL_91:.*]]: index, %[[VAL_92:.*]]: tensor<6x6xi32, #{{.*}}>):
-// CHECK:               %[[VAL_93:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK:               %[[VAL_94:.*]] = arith.addi %[[VAL_93]], %[[VAL_6]] : index
-// CHECK:               %[[VAL_95:.*]] = arith.addi %[[VAL_94]], %[[VAL_5]] : index
-// CHECK:               %[[VAL_96:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_94]]] : memref<?xindex>
-// CHECK:               %[[VAL_97:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_95]]] : memref<?xindex>
-// CHECK:               %[[VAL_98:.*]] = arith.addi %[[VAL_35]], %[[VAL_3]] : index
-// CHECK:               %[[VAL_99:.*]]:5 = scf.while (%[[VAL_100:.*]] = %[[VAL_96]], %[[VAL_101:.*]] = %[[VAL_9]], %[[VAL_102:.*]] = %[[VAL_8]], %[[VAL_103:.*]] = %[[VAL_10]], %[[VAL_104:.*]] = %[[VAL_92]]) : (index, i1, i32, i1, tensor<6x6xi32, #{{.*}}>) -> (index, i1, i32, i1, tensor<6x6xi32, #{{.*}}>) {
-// CHECK:                 %[[VAL_105:.*]] = arith.cmpi ult, %[[VAL_100]], %[[VAL_97]] : index
-// CHECK:                 %[[VAL_106:.*]] = arith.andi %[[VAL_101]], %[[VAL_105]] : i1
-// CHECK:                 scf.condition(%[[VAL_106]]) %[[VAL_100]], %[[VAL_101]], %[[VAL_102]], %[[VAL_103]], %[[VAL_104]] : index, i1, i32, i1, tensor<6x6xi32, #{{.*}}>
-// CHECK:               } do {
-// CHECK:               ^bb0(%[[VAL_107:.*]]: index, %[[VAL_108:.*]]: i1, %[[VAL_109:.*]]: i32, %[[VAL_110:.*]]: i1, %[[VAL_111:.*]]: tensor<6x6xi32, #{{.*}}>):
-// CHECK:                 %[[VAL_112:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_107]]] : memref<?xindex>
-// CHECK:                 %[[VAL_113:.*]] = arith.cmpi ult, %[[VAL_112]], %[[VAL_98]] : index
-// CHECK:                 %[[VAL_114:.*]]:3 = scf.if %[[VAL_113]] -> (i32, i1, tensor<6x6xi32, #{{.*}}>) {
-// CHECK:                   %[[VAL_115:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_107]]] : memref<?xindex>
-// CHECK:                   %[[VAL_116:.*]] = arith.subi %[[VAL_115]], %[[VAL_35]] : index
-// CHECK:                   %[[VAL_117:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK:                   %[[VAL_118:.*]] = arith.addi %[[VAL_117]], %[[VAL_6]] : index
-// CHECK:                   %[[VAL_119:.*]] = arith.addi %[[VAL_118]], %[[VAL_5]] : index
-// CHECK:                   %[[VAL_120:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_118]]] : memref<?xindex>
-// CHECK:                   %[[VAL_121:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_119]]] : memref<?xindex>
-// CHECK:                   %[[VAL_122:.*]] = arith.addi %[[VAL_91]], %[[VAL_3]] : index
-// CHECK:                   %[[VAL_123:.*]]:5 = scf.while (%[[VAL_124:.*]] = %[[VAL_120]], %[[VAL_125:.*]] = %[[VAL_9]], %[[VAL_126:.*]] = %[[VAL_109]], %[[VAL_127:.*]] = %[[VAL_110]], %[[VAL_128:.*]] = %[[VAL_111]]) : (index, i1, i32, i1, tensor<6x6xi32, #{{.*}}>) -> (index, i1, i32, i1, tensor<6x6xi32, #{{.*}}>) {
-// CHECK:                     %[[VAL_129:.*]] = arith.cmpi ult, %[[VAL_124]], %[[VAL_121]] : index
-// CHECK:                     %[[VAL_130:.*]] = arith.andi %[[VAL_125]], %[[VAL_129]] : i1
-// CHECK:                     scf.condition(%[[VAL_130]]) %[[VAL_124]], %[[VAL_125]], %[[VAL_126]], %[[VAL_127]], %[[VAL_128]] : index, i1, i32, i1, tensor<6x6xi32, #{{.*}}>
-// CHECK:                   } do {
-// CHECK:                   ^bb0(%[[VAL_131:.*]]: index, %[[VAL_132:.*]]: i1, %[[VAL_133:.*]]: i32, %[[VAL_134:.*]]: i1, %[[VAL_135:.*]]: tensor<6x6xi32, #{{.*}}>):
-// CHECK:                     %[[VAL_136:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_131]]] : memref<?xindex>
-// CHECK:                     %[[VAL_137:.*]] = arith.cmpi ult, %[[VAL_136]], %[[VAL_122]] : index
-// CHECK:                     %[[VAL_138:.*]]:3 = scf.if %[[VAL_137]] -> (i32, i1, tensor<6x6xi32, #{{.*}}>) {
-// CHECK:                       %[[VAL_139:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_131]]] : memref<?xindex>
-// CHECK:                       %[[VAL_140:.*]] = arith.subi %[[VAL_139]], %[[VAL_91]] : index
-// CHECK:                       %[[VAL_141:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_131]]] : memref<?xi32>
-// CHECK:                       %[[VAL_142:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_116]], %[[VAL_140]]] : memref<3x3xi32>
-// CHECK:                       %[[VAL_143:.*]] = arith.muli %[[VAL_141]], %[[VAL_142]] : i32
-// CHECK:                       %[[VAL_144:.*]] = arith.addi %[[VAL_133]], %[[VAL_143]] : i32
-// CHECK:                       scf.yield %[[VAL_144]], %[[VAL_9]], %[[VAL_135]] : i32, i1, tensor<6x6xi32, #{{.*}}>
-// CHECK:                     } else {
-// CHECK:                       scf.yield %[[VAL_133]], %[[VAL_134]], %[[VAL_135]] : i32, i1, tensor<6x6xi32, #{{.*}}>
-// CHECK:                     } {"Emitted from" = "slice"}
-// CHECK:                     %[[VAL_145:.*]] = arith.addi %[[VAL_131]], %[[VAL_5]] : index
-// CHECK:                     scf.yield %[[VAL_145]], %[[VAL_137]], %[[VAL_146:.*]]#0, %[[VAL_146]]#1, %[[VAL_146]]#2 : index, i1, i32, i1, tensor<6x6xi32, #{{.*}}>
-// CHECK:                   } attributes {"Emitted from" = "linalg.generic"}
-// CHECK:                   %[[VAL_147:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK:                   %[[VAL_148:.*]] = arith.addi %[[VAL_147]], %[[VAL_6]] : index
-// CHECK:                   memref.store %[[VAL_148]], %[[VAL_18]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK:                   scf.yield %[[VAL_149:.*]]#2, %[[VAL_9]], %[[VAL_149]]#4 : i32, i1, tensor<6x6xi32, #{{.*}}>
+// CHECK:             ^bb0(%[[VAL_82:.*]]: index, %[[VAL_83:.*]]: index, %[[VAL_84:.*]]: tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>):
+// CHECK:               %[[VAL_85:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_9]]] : memref<4xindex>
+// CHECK:               %[[VAL_86:.*]] = arith.addi %[[VAL_85]], %[[VAL_7]] : index
+// CHECK:               %[[VAL_87:.*]] = arith.addi %[[VAL_85]], %[[VAL_5]] : index
+// CHECK:               %[[VAL_88:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_86]]] : memref<4xindex>
+// CHECK:               %[[VAL_89:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_87]]] : memref<4xindex>
+// CHECK:               %[[VAL_90:.*]] = arith.addi %[[VAL_33]], %[[VAL_5]] : index
+// CHECK:               %[[VAL_91:.*]]:3 = scf.while (%[[VAL_92:.*]] = %[[VAL_88]], %[[VAL_93:.*]] = %[[VAL_10]], %[[VAL_94:.*]] = %[[VAL_11]]) : (index, i32, i1) -> (index, i32, i1) {
+// CHECK:                 %[[VAL_95:.*]] = arith.cmpi ult, %[[VAL_92]], %[[VAL_89]] : index
+// CHECK:                 %[[VAL_96:.*]] = scf.if %[[VAL_95]] -> (i1) {
+// CHECK:                   %[[VAL_97:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_92]]] : memref<?xindex>
+// CHECK:                   %[[VAL_98:.*]] = arith.cmpi ult, %[[VAL_97]], %[[VAL_90]] : index
+// CHECK:                   scf.yield %[[VAL_98]] : i1
 // CHECK:                 } else {
-// CHECK:                   scf.yield %[[VAL_109]], %[[VAL_110]], %[[VAL_111]] : i32, i1, tensor<6x6xi32, #{{.*}}>
-// CHECK:                 } {"Emitted from" = "slice"}
-// CHECK:                 %[[VAL_150:.*]] = arith.addi %[[VAL_107]], %[[VAL_5]] : index
-// CHECK:                 scf.yield %[[VAL_150]], %[[VAL_113]], %[[VAL_151:.*]]#0, %[[VAL_151]]#1, %[[VAL_151]]#2 : index, i1, i32, i1, tensor<6x6xi32, #{{.*}}>
-// CHECK:               } attributes {"Emitted from" = "linalg.generic"}
-// CHECK:               %[[VAL_152:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK:               %[[VAL_153:.*]] = arith.addi %[[VAL_152]], %[[VAL_6]] : index
-// CHECK:               memref.store %[[VAL_153]], %[[VAL_19]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK:               %[[VAL_154:.*]] = scf.if %[[VAL_155:.*]]#3 -> (tensor<6x6xi32, #{{.*}}>) {
-// CHECK:                 %[[VAL_156:.*]] = sparse_tensor.insert %[[VAL_155]]#2 into %[[VAL_155]]#4{{\[}}%[[VAL_35]], %[[VAL_91]]] : tensor<6x6xi32, #{{.*}}>
-// CHECK:                 scf.yield %[[VAL_156]] : tensor<6x6xi32, #{{.*}}>
+// CHECK:                   scf.yield %[[VAL_11]] : i1
+// CHECK:                 }
+// CHECK:                 scf.condition(%[[VAL_99:.*]]) %[[VAL_92]], %[[VAL_93]], %[[VAL_94]] : index, i32, i1
+// CHECK:               } do {
+// CHECK:               ^bb0(%[[VAL_100:.*]]: index, %[[VAL_101:.*]]: i32, %[[VAL_102:.*]]: i1):
+// CHECK:                 %[[VAL_103:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_100]]] : memref<?xindex>
+// CHECK:                 %[[VAL_104:.*]] = arith.subi %[[VAL_103]], %[[VAL_33]] : index
+// CHECK:                 %[[VAL_105:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_9]]] : memref<8xindex>
+// CHECK:                 %[[VAL_106:.*]] = arith.addi %[[VAL_105]], %[[VAL_7]] : index
+// CHECK:                 %[[VAL_107:.*]] = arith.addi %[[VAL_105]], %[[VAL_5]] : index
+// CHECK:                 %[[VAL_108:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_106]]] : memref<8xindex>
+// CHECK:                 %[[VAL_109:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_107]]] : memref<8xindex>
+// CHECK:                 %[[VAL_110:.*]] = arith.addi %[[VAL_83]], %[[VAL_5]] : index
+// CHECK:                 %[[VAL_111:.*]]:2 = scf.while (%[[VAL_112:.*]] = %[[VAL_108]], %[[VAL_113:.*]] = %[[VAL_101]]) : (index, i32) -> (index, i32) {
+// CHECK:                   %[[VAL_114:.*]] = arith.cmpi ult, %[[VAL_112]], %[[VAL_109]] : index
+// CHECK:                   %[[VAL_115:.*]] = scf.if %[[VAL_114]] -> (i1) {
+// CHECK:                     %[[VAL_116:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_112]]] : memref<?xindex>
+// CHECK:                     %[[VAL_117:.*]] = arith.cmpi ult, %[[VAL_116]], %[[VAL_110]] : index
+// CHECK:                     scf.yield %[[VAL_117]] : i1
+// CHECK:                   } else {
+// CHECK:                     scf.yield %[[VAL_11]] : i1
+// CHECK:                   }
+// CHECK:                   scf.condition(%[[VAL_118:.*]]) %[[VAL_112]], %[[VAL_113]] : index, i32
+// CHECK:                 } do {
+// CHECK:                 ^bb0(%[[VAL_119:.*]]: index, %[[VAL_120:.*]]: i32):
+// CHECK:                   %[[VAL_121:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_119]]] : memref<?xindex>
+// CHECK:                   %[[VAL_122:.*]] = arith.subi %[[VAL_121]], %[[VAL_83]] : index
+// CHECK:                   %[[VAL_123:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_119]]] : memref<?xi32>
+// CHECK:                   %[[VAL_124:.*]] = tensor.extract %[[VAL_1]]{{\[}}%[[VAL_104]], %[[VAL_122]]] : tensor<3x3xi32>
+// CHECK:                   %[[VAL_125:.*]] = arith.muli %[[VAL_123]], %[[VAL_124]] : i32
+// CHECK:                   %[[VAL_126:.*]] = arith.addi %[[VAL_120]], %[[VAL_125]] : i32
+// CHECK:                   %[[VAL_127:.*]] = arith.addi %[[VAL_119]], %[[VAL_9]] : index
+// CHECK:                   scf.yield %[[VAL_127]], %[[VAL_126]] : index, i32
+// CHECK:                 }
+// CHECK:                 %[[VAL_128:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_9]]] : memref<8xindex>
+// CHECK:                 %[[VAL_129:.*]] = arith.addi %[[VAL_128]], %[[VAL_7]] : index
+// CHECK:                 memref.store %[[VAL_129]], %[[VAL_18]]{{\[}}%[[VAL_9]]] : memref<8xindex>
+// CHECK:                 %[[VAL_130:.*]] = arith.addi %[[VAL_100]], %[[VAL_9]] : index
+// CHECK:                 scf.yield %[[VAL_130]], %[[VAL_131:.*]]#1, %[[VAL_2]] : index, i32, i1
+// CHECK:               }
+// CHECK:               %[[VAL_132:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_9]]] : memref<4xindex>
+// CHECK:               %[[VAL_133:.*]] = arith.addi %[[VAL_132]], %[[VAL_7]] : index
+// CHECK:               memref.store %[[VAL_133]], %[[VAL_19]]{{\[}}%[[VAL_9]]] : memref<4xindex>
+// CHECK:               %[[VAL_134:.*]] = scf.if %[[VAL_135:.*]]#2 -> (tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>) {
+// CHECK:                 %[[VAL_136:.*]] = sparse_tensor.insert %[[VAL_135]]#1 into %[[VAL_84]]{{\[}}%[[VAL_33]], %[[VAL_83]]] : tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>
+// CHECK:                 scf.yield %[[VAL_136]] : tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:               } else {
-// CHECK:                 scf.yield %[[VAL_157:.*]]#4 : tensor<6x6xi32, #{{.*}}>
+// CHECK:                 scf.yield %[[VAL_84]] : tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:               }
-// CHECK:               memref.store %[[VAL_4]], %[[VAL_19]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK:               memref.store %[[VAL_4]], %[[VAL_18]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK:               %[[VAL_158:.*]] = arith.cmpi ugt, %[[VAL_90]], %[[VAL_91]] : index
-// CHECK:               %[[VAL_159:.*]]:3 = scf.if %[[VAL_158]] -> (index, i1, index) {
-// CHECK:                 %[[VAL_160:.*]] = arith.addi %[[VAL_91]], %[[VAL_5]] : index
-// CHECK:                 scf.yield %[[VAL_90]], %[[VAL_89]], %[[VAL_160]] : index, i1, index
+// CHECK:               memref.store %[[VAL_8]], %[[VAL_19]]{{\[}}%[[VAL_9]]] : memref<4xindex>
+// CHECK:               memref.store %[[VAL_8]], %[[VAL_18]]{{\[}}%[[VAL_9]]] : memref<8xindex>
+// CHECK:               %[[VAL_137:.*]] = arith.cmpi ugt, %[[VAL_82]], %[[VAL_83]] : index
+// CHECK:               %[[VAL_138:.*]]:3 = scf.if %[[VAL_137]] -> (index, i1, index) {
+// CHECK:                 %[[VAL_139:.*]] = arith.addi %[[VAL_83]], %[[VAL_9]] : index
+// CHECK:                 scf.yield %[[VAL_82]], %[[VAL_2]], %[[VAL_139]] : index, i1, index
 // CHECK:               } else {
-// CHECK:                 %[[VAL_161:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK:                 %[[VAL_162:.*]]:2 = scf.for %[[VAL_163:.*]] = %[[VAL_6]] to %[[VAL_161]] step %[[VAL_6]] iter_args(%[[VAL_164:.*]] = %[[VAL_2]], %[[VAL_165:.*]] = %[[VAL_10]]) -> (index, i1) {
-// CHECK:                   %[[VAL_166:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_163]]] : memref<?xindex>
-// CHECK:                   %[[VAL_167:.*]] = arith.addi %[[VAL_163]], %[[VAL_5]] : index
-// CHECK:                   %[[VAL_168:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_167]]] : memref<?xindex>
-// CHECK:                   %[[VAL_169:.*]] = arith.cmpi ult, %[[VAL_166]], %[[VAL_168]] : index
-// CHECK:                   %[[VAL_170:.*]] = scf.if %[[VAL_169]] -> (index) {
-// CHECK:                     %[[VAL_171:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_166]]] : memref<?xindex>
-// CHECK:                     %[[VAL_172:.*]] = arith.cmpi eq, %[[VAL_171]], %[[VAL_90]] : index
-// CHECK:                     %[[VAL_173:.*]] = scf.if %[[VAL_172]] -> (index) {
-// CHECK:                       %[[VAL_174:.*]] = arith.addi %[[VAL_166]], %[[VAL_5]] : index
-// CHECK:                       memref.store %[[VAL_174]], %[[VAL_18]]{{\[}}%[[VAL_163]]] : memref<?xindex>
-// CHECK:                       scf.yield %[[VAL_174]] : index
+// CHECK:                 %[[VAL_140:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_8]]] : memref<8xindex>
+// CHECK:                 %[[VAL_141:.*]]:2 = scf.for %[[VAL_142:.*]] = %[[VAL_7]] to %[[VAL_140]] step %[[VAL_7]] iter_args(%[[VAL_143:.*]] = %[[VAL_4]], %[[VAL_144:.*]] = %[[VAL_11]]) -> (index, i1) {
+// CHECK:                   %[[VAL_145:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_142]]] : memref<8xindex>
+// CHECK:                   %[[VAL_146:.*]] = arith.addi %[[VAL_142]], %[[VAL_9]] : index
+// CHECK:                   %[[VAL_147:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_146]]] : memref<8xindex>
+// CHECK:                   %[[VAL_148:.*]] = arith.cmpi ult, %[[VAL_145]], %[[VAL_147]] : index
+// CHECK:                   %[[VAL_149:.*]] = scf.if %[[VAL_148]] -> (index) {
+// CHECK:                     %[[VAL_150:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_145]]] : memref<?xindex>
+// CHECK:                     %[[VAL_151:.*]] = arith.cmpi eq, %[[VAL_150]], %[[VAL_82]] : index
+// CHECK:                     %[[VAL_152:.*]] = scf.if %[[VAL_151]] -> (index) {
+// CHECK:                       %[[VAL_153:.*]] = arith.addi %[[VAL_145]], %[[VAL_9]] : index
+// CHECK:                       memref.store %[[VAL_153]], %[[VAL_18]]{{\[}}%[[VAL_142]]] : memref<8xindex>
+// CHECK:                       scf.yield %[[VAL_153]] : index
 // CHECK:                     } else {
-// CHECK:                       scf.yield %[[VAL_166]] : index
+// CHECK:                       scf.yield %[[VAL_145]] : index
 // CHECK:                     }
-// CHECK:                     scf.yield %[[VAL_175:.*]] : index
+// CHECK:                     scf.yield %[[VAL_154:.*]] : index
 // CHECK:                   } else {
-// CHECK:                     scf.yield %[[VAL_166]] : index
+// CHECK:                     scf.yield %[[VAL_145]] : index
 // CHECK:                   }
-// CHECK:                   %[[VAL_176:.*]] = arith.cmpi ult, %[[VAL_177:.*]], %[[VAL_168]] : index
-// CHECK:                   %[[VAL_178:.*]] = scf.if %[[VAL_176]] -> (index) {
-// CHECK:                     %[[VAL_179:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_177]]] : memref<?xindex>
-// CHECK:                     scf.yield %[[VAL_179]] : index
+// CHECK:                   %[[VAL_155:.*]] = arith.cmpi ult, %[[VAL_156:.*]], %[[VAL_147]] : index
+// CHECK:                   %[[VAL_157:.*]] = scf.if %[[VAL_155]] -> (index) {
+// CHECK:                     %[[VAL_158:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_156]]] : memref<?xindex>
+// CHECK:                     scf.yield %[[VAL_158]] : index
 // CHECK:                   } else {
-// CHECK:                     scf.yield %[[VAL_164]] : index
+// CHECK:                     scf.yield %[[VAL_143]] : index
 // CHECK:                   }
-// CHECK:                   %[[VAL_180:.*]] = arith.ori %[[VAL_176]], %[[VAL_165]] : i1
-// CHECK:                   %[[VAL_181:.*]] = arith.cmpi ult, %[[VAL_182:.*]], %[[VAL_164]] : index
-// CHECK:                   %[[VAL_183:.*]] = arith.select %[[VAL_181]], %[[VAL_182]], %[[VAL_164]] : index
-// CHECK:                   scf.yield %[[VAL_183]], %[[VAL_180]] : index, i1
+// CHECK:                   %[[VAL_159:.*]] = arith.ori %[[VAL_155]], %[[VAL_144]] : i1
+// CHECK:                   %[[VAL_160:.*]] = arith.cmpi ult, %[[VAL_161:.*]], %[[VAL_143]] : index
+// CHECK:                   %[[VAL_162:.*]] = arith.select %[[VAL_160]], %[[VAL_161]], %[[VAL_143]] : index
+// CHECK:                   scf.yield %[[VAL_162]], %[[VAL_159]] : index, i1
 // CHECK:                 }
-// CHECK:                 %[[VAL_184:.*]] = arith.addi %[[VAL_185:.*]]#0, %[[VAL_5]] : index
-// CHECK:                 %[[VAL_186:.*]] = arith.subi %[[VAL_184]], %[[VAL_3]] : index
-// CHECK:                 %[[VAL_187:.*]] = arith.cmpi uge, %[[VAL_184]], %[[VAL_3]] : index
-// CHECK:                 %[[VAL_188:.*]] = arith.select %[[VAL_187]], %[[VAL_186]], %[[VAL_4]] : index
-// CHECK:                 scf.yield %[[VAL_185]]#0, %[[VAL_185]]#1, %[[VAL_188]] : index, i1, index
+// CHECK:                 %[[VAL_163:.*]] = arith.addi %[[VAL_164:.*]]#0, %[[VAL_9]] : index
+// CHECK:                 %[[VAL_165:.*]] = arith.addi %[[VAL_164]]#0, %[[VAL_3]] : index
+// CHECK:                 %[[VAL_166:.*]] = arith.cmpi uge, %[[VAL_163]], %[[VAL_5]] : index
+// CHECK:                 %[[VAL_167:.*]] = arith.select %[[VAL_166]], %[[VAL_165]], %[[VAL_8]] : index
+// CHECK:                 scf.yield %[[VAL_164]]#0, %[[VAL_164]]#1, %[[VAL_167]] : index, i1, index
 // CHECK:               }
-// CHECK:               %[[VAL_189:.*]] = arith.addi %[[VAL_91]], %[[VAL_5]] : index
-// CHECK:               %[[VAL_190:.*]] = arith.cmpi ugt, %[[VAL_191:.*]]#2, %[[VAL_189]] : index
-// CHECK:               %[[VAL_192:.*]] = arith.select %[[VAL_190]], %[[VAL_191]]#2, %[[VAL_189]] : index
-// CHECK:               %[[VAL_193:.*]] = arith.addi %[[VAL_192]], %[[VAL_3]] : index
-// CHECK:               %[[VAL_194:.*]] = arith.cmpi ule, %[[VAL_193]], %[[VAL_2]] : index
-// CHECK:               %[[VAL_195:.*]] = arith.andi %[[VAL_191]]#1, %[[VAL_194]] : i1
-// CHECK:               scf.yield %[[VAL_195]], %[[VAL_191]]#0, %[[VAL_192]], %[[VAL_196:.*]] : i1, index, index, tensor<6x6xi32, #{{.*}}>
-// CHECK:             } attributes {"Emitted from" = "linalg.generic"}
-// CHECK:             memref.store %[[VAL_4]], %[[VAL_19]]{{\[}}%[[VAL_5]]] : memref<?xindex>
-// CHECK:             %[[VAL_197:.*]] = arith.cmpi ugt, %[[VAL_34]], %[[VAL_35]] : index
-// CHECK:             %[[VAL_198:.*]]:3 = scf.if %[[VAL_197]] -> (index, i1, index) {
-// CHECK:               %[[VAL_199:.*]] = arith.addi %[[VAL_35]], %[[VAL_5]] : index
-// CHECK:               scf.yield %[[VAL_34]], %[[VAL_33]], %[[VAL_199]] : index, i1, index
+// CHECK:               %[[VAL_168:.*]] = arith.addi %[[VAL_83]], %[[VAL_9]] : index
+// CHECK:               %[[VAL_169:.*]] = arith.cmpi ugt, %[[VAL_170:.*]]#2, %[[VAL_168]] : index
+// CHECK:               %[[VAL_171:.*]] = arith.select %[[VAL_169]], %[[VAL_170]]#2, %[[VAL_168]] : index
+// CHECK:               %[[VAL_172:.*]] = arith.addi %[[VAL_171]], %[[VAL_5]] : index
+// CHECK:               %[[VAL_173:.*]] = arith.cmpi ule, %[[VAL_172]], %[[VAL_4]] : index
+// CHECK:               %[[VAL_174:.*]] = arith.andi %[[VAL_170]]#1, %[[VAL_173]] : i1
+// CHECK:               scf.yield %[[VAL_174]], %[[VAL_170]]#0, %[[VAL_171]], %[[VAL_175:.*]] : i1, index, index, tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>
+// CHECK:             }
+// CHECK:             memref.store %[[VAL_8]], %[[VAL_19]]{{\[}}%[[VAL_9]]] : memref<4xindex>
+// CHECK:             %[[VAL_176:.*]] = arith.cmpi ugt, %[[VAL_32]], %[[VAL_33]] : index
+// CHECK:             %[[VAL_177:.*]]:3 = scf.if %[[VAL_176]] -> (index, i1, index) {
+// CHECK:               %[[VAL_178:.*]] = arith.addi %[[VAL_33]], %[[VAL_9]] : index
+// CHECK:               scf.yield %[[VAL_32]], %[[VAL_2]], %[[VAL_178]] : index, i1, index
 // CHECK:             } else {
-// CHECK:               %[[VAL_200:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_4]]] : memref<?xindex>
-// CHECK:               %[[VAL_201:.*]]:2 = scf.for %[[VAL_202:.*]] = %[[VAL_6]] to %[[VAL_200]] step %[[VAL_6]] iter_args(%[[VAL_203:.*]] = %[[VAL_2]], %[[VAL_204:.*]] = %[[VAL_10]]) -> (index, i1) {
-// CHECK:                 %[[VAL_205:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_202]]] : memref<?xindex>
-// CHECK:                 %[[VAL_206:.*]] = arith.addi %[[VAL_202]], %[[VAL_5]] : index
-// CHECK:                 %[[VAL_207:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_206]]] : memref<?xindex>
-// CHECK:                 %[[VAL_208:.*]] = arith.cmpi ult, %[[VAL_205]], %[[VAL_207]] : index
-// CHECK:                 %[[VAL_209:.*]] = scf.if %[[VAL_208]] -> (index) {
-// CHECK:                   %[[VAL_210:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_205]]] : memref<?xindex>
-// CHECK:                   %[[VAL_211:.*]] = arith.cmpi eq, %[[VAL_210]], %[[VAL_34]] : index
-// CHECK:                   %[[VAL_212:.*]] = scf.if %[[VAL_211]] -> (index) {
-// CHECK:                     %[[VAL_213:.*]] = arith.addi %[[VAL_205]], %[[VAL_5]] : index
-// CHECK:                     memref.store %[[VAL_213]], %[[VAL_19]]{{\[}}%[[VAL_202]]] : memref<?xindex>
-// CHECK:                     scf.yield %[[VAL_213]] : index
+// CHECK:               %[[VAL_179:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_8]]] : memref<4xindex>
+// CHECK:               %[[VAL_180:.*]]:2 = scf.for %[[VAL_181:.*]] = %[[VAL_7]] to %[[VAL_179]] step %[[VAL_7]] iter_args(%[[VAL_182:.*]] = %[[VAL_4]], %[[VAL_183:.*]] = %[[VAL_11]]) -> (index, i1) {
+// CHECK:                 %[[VAL_184:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_181]]] : memref<4xindex>
+// CHECK:                 %[[VAL_185:.*]] = arith.addi %[[VAL_181]], %[[VAL_9]] : index
+// CHECK:                 %[[VAL_186:.*]] = memref.load %[[VAL_19]]{{\[}}%[[VAL_185]]] : memref<4xindex>
+// CHECK:                 %[[VAL_187:.*]] = arith.cmpi ult, %[[VAL_184]], %[[VAL_186]] : index
+// CHECK:                 %[[VAL_188:.*]] = scf.if %[[VAL_187]] -> (index) {
+// CHECK:                   %[[VAL_189:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_184]]] : memref<?xindex>
+// CHECK:                   %[[VAL_190:.*]] = arith.cmpi eq, %[[VAL_189]], %[[VAL_32]] : index
+// CHECK:                   %[[VAL_191:.*]] = scf.if %[[VAL_190]] -> (index) {
+// CHECK:                     %[[VAL_192:.*]] = arith.addi %[[VAL_184]], %[[VAL_9]] : index
+// CHECK:                     memref.store %[[VAL_192]], %[[VAL_19]]{{\[}}%[[VAL_181]]] : memref<4xindex>
+// CHECK:                     scf.yield %[[VAL_192]] : index
 // CHECK:                   } else {
-// CHECK:                     scf.yield %[[VAL_205]] : index
+// CHECK:                     scf.yield %[[VAL_184]] : index
 // CHECK:                   }
-// CHECK:                   scf.yield %[[VAL_214:.*]] : index
+// CHECK:                   scf.yield %[[VAL_193:.*]] : index
 // CHECK:                 } else {
-// CHECK:                   scf.yield %[[VAL_205]] : index
+// CHECK:                   scf.yield %[[VAL_184]] : index
 // CHECK:                 }
-// CHECK:                 %[[VAL_215:.*]] = arith.cmpi ult, %[[VAL_216:.*]], %[[VAL_207]] : index
-// CHECK:                 %[[VAL_217:.*]] = scf.if %[[VAL_215]] -> (index) {
-// CHECK:                   %[[VAL_218:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_216]]] : memref<?xindex>
-// CHECK:                   scf.yield %[[VAL_218]] : index
+// CHECK:                 %[[VAL_194:.*]] = arith.cmpi ult, %[[VAL_195:.*]], %[[VAL_186]] : index
+// CHECK:                 %[[VAL_196:.*]] = scf.if %[[VAL_194]] -> (index) {
+// CHECK:                   %[[VAL_197:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_195]]] : memref<?xindex>
+// CHECK:                   scf.yield %[[VAL_197]] : index
 // CHECK:                 } else {
-// CHECK:                   scf.yield %[[VAL_203]] : index
+// CHECK:                   scf.yield %[[VAL_182]] : index
 // CHECK:                 }
-// CHECK:                 %[[VAL_219:.*]] = arith.ori %[[VAL_215]], %[[VAL_204]] : i1
-// CHECK:                 %[[VAL_220:.*]] = arith.cmpi ult, %[[VAL_221:.*]], %[[VAL_203]] : index
-// CHECK:                 %[[VAL_222:.*]] = arith.select %[[VAL_220]], %[[VAL_221]], %[[VAL_203]] : index
-// CHECK:                 scf.yield %[[VAL_222]], %[[VAL_219]] : index, i1
+// CHECK:                 %[[VAL_198:.*]] = arith.ori %[[VAL_194]], %[[VAL_183]] : i1
+// CHECK:                 %[[VAL_199:.*]] = arith.cmpi ult, %[[VAL_200:.*]], %[[VAL_182]] : index
+// CHECK:                 %[[VAL_201:.*]] = arith.select %[[VAL_199]], %[[VAL_200]], %[[VAL_182]] : index
+// CHECK:                 scf.yield %[[VAL_201]], %[[VAL_198]] : index, i1
 // CHECK:               }
-// CHECK:               %[[VAL_223:.*]] = arith.addi %[[VAL_224:.*]]#0, %[[VAL_5]] : index
-// CHECK:               %[[VAL_225:.*]] = arith.subi %[[VAL_223]], %[[VAL_3]] : index
-// CHECK:               %[[VAL_226:.*]] = arith.cmpi uge, %[[VAL_223]], %[[VAL_3]] : index
-// CHECK:               %[[VAL_227:.*]] = arith.select %[[VAL_226]], %[[VAL_225]], %[[VAL_4]] : index
-// CHECK:               scf.yield %[[VAL_224]]#0, %[[VAL_224]]#1, %[[VAL_227]] : index, i1, index
+// CHECK:               %[[VAL_202:.*]] = arith.addi %[[VAL_203:.*]]#0, %[[VAL_9]] : index
+// CHECK:               %[[VAL_204:.*]] = arith.addi %[[VAL_203]]#0, %[[VAL_3]] : index
+// CHECK:               %[[VAL_205:.*]] = arith.cmpi uge, %[[VAL_202]], %[[VAL_5]] : index
+// CHECK:               %[[VAL_206:.*]] = arith.select %[[VAL_205]], %[[VAL_204]], %[[VAL_8]] : index
+// CHECK:               scf.yield %[[VAL_203]]#0, %[[VAL_203]]#1, %[[VAL_206]] : index, i1, index
 // CHECK:             }
-// CHECK:             %[[VAL_228:.*]] = arith.addi %[[VAL_35]], %[[VAL_5]] : index
-// CHECK:             %[[VAL_229:.*]] = arith.cmpi ugt, %[[VAL_230:.*]]#2, %[[VAL_228]] : index
-// CHECK:             %[[VAL_231:.*]] = arith.select %[[VAL_229]], %[[VAL_230]]#2, %[[VAL_228]] : index
-// CHECK:             %[[VAL_232:.*]] = arith.addi %[[VAL_231]], %[[VAL_3]] : index
-// CHECK:             %[[VAL_233:.*]] = arith.cmpi ule, %[[VAL_232]], %[[VAL_2]] : index
-// CHECK:             %[[VAL_234:.*]] = arith.andi %[[VAL_230]]#1, %[[VAL_233]] : i1
-// CHECK:             scf.yield %[[VAL_234]], %[[VAL_230]]#0, %[[VAL_231]], %[[VAL_235:.*]]#3 : i1, index, index, tensor<6x6xi32, #{{.*}}>
-// CHECK:           } attributes {"Emitted from" = "linalg.generic"}
-// CHECK:           %[[VAL_236:.*]] = sparse_tensor.load %[[VAL_237:.*]]#3 hasInserts : tensor<6x6xi32, #{{.*}}>
-// CHECK:           return %[[VAL_236]] : tensor<6x6xi32, #{{.*}}>
+// CHECK:             %[[VAL_207:.*]] = arith.addi %[[VAL_33]], %[[VAL_9]] : index
+// CHECK:             %[[VAL_208:.*]] = arith.cmpi ugt, %[[VAL_209:.*]]#2, %[[VAL_207]] : index
+// CHECK:             %[[VAL_210:.*]] = arith.select %[[VAL_208]], %[[VAL_209]]#2, %[[VAL_207]] : index
+// CHECK:             %[[VAL_211:.*]] = arith.addi %[[VAL_210]], %[[VAL_5]] : index
+// CHECK:             %[[VAL_212:.*]] = arith.cmpi ule, %[[VAL_211]], %[[VAL_4]] : index
+// CHECK:             %[[VAL_213:.*]] = arith.andi %[[VAL_209]]#1, %[[VAL_212]] : i1
+// CHECK:             scf.yield %[[VAL_213]], %[[VAL_209]]#0, %[[VAL_210]], %[[VAL_214:.*]]#2 : i1, index, index, tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>
+// CHECK:           }
+// CHECK:           %[[VAL_215:.*]] = sparse_tensor.load %[[VAL_216:.*]]#2 hasInserts : tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>
+// CHECK:           return %[[VAL_215]] : tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>
 // CHECK:         }
 func.func @conv2d_all_sparse_CSR(%arg0: tensor<8x8xi32, #DCSR>,
                                  %arg1: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {


        


More information about the Mlir-commits mailing list