[Mlir-commits] [mlir] a1f55bd - Adopt Properties to store operations inherent Attributes in TOSA

Mehdi Amini llvmlistbot at llvm.org
Tue May 2 20:57:04 PDT 2023


Author: Mehdi Amini
Date: 2023-05-02T20:56:30-07:00
New Revision: a1f55bd33a08cb2d233a5dd50c9f4126dac17300

URL: https://github.com/llvm/llvm-project/commit/a1f55bd33a08cb2d233a5dd50c9f4126dac17300
DIFF: https://github.com/llvm/llvm-project/commit/a1f55bd33a08cb2d233a5dd50c9f4126dac17300.diff

LOG: Adopt Properties to store operations inherent Attributes in TOSA

This is part of an on-going migration to adopt Properties inside MLIR.

Differential Revision: https://reviews.llvm.org/D148296

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
    mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
    mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
    mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
    mlir/test/Dialect/Tosa/broadcast.mlir
    mlir/test/Dialect/Tosa/canonicalize.mlir
    mlir/test/Dialect/Tosa/constant-op-fold.mlir
    mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir
    mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir
    mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir
    mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
index 110a334011021..7e4d67575fdf4 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOpBase.td
@@ -45,6 +45,7 @@ def Tosa_Dialect : Dialect {
   let cppNamespace = "mlir::tosa";
   let hasConstantMaterializer = 1;
   let useDefaultAttributePrinterParser = 1;
+  let usePropertiesForAttributes = 1;
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index aec98e256bf94..5c17d281c2ec7 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -637,7 +637,9 @@ LogicalResult tosa::SliceOp::inferReturnTypeComponents(
     OpaqueProperties properties, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   inferredReturnShapes.push_back(ShapedTypeComponents(
-      convertToMlirShape(SliceOpAdaptor(operands, attributes).getSize())));
+      convertToMlirShape(SliceOpAdaptor(operands, attributes,
+                                        *properties.as<Properties *>(), regions)
+                             .getSize())));
   return success();
 }
 
@@ -663,7 +665,8 @@ LogicalResult tosa::TileOp::inferReturnTypeComponents(
     ValueShapeRange operands, DictionaryAttr attributes,
     OpaqueProperties properties, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
-  TileOpAdaptor adaptor(operands, attributes);
+  TileOpAdaptor adaptor(operands, attributes, *properties.as<Properties *>(),
+                        regions);
   ArrayRef<int64_t> multiples = adaptor.getMultiples();
   ShapeAdaptor inputShape = operands.getShape(0);
   SmallVector<int64_t> outputShape;
@@ -697,7 +700,8 @@ LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
     ValueShapeRange operands, DictionaryAttr attributes,
     OpaqueProperties properties, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
-  ReshapeOpAdaptor adaptor(operands, attributes);
+  ReshapeOpAdaptor adaptor(operands, attributes, *properties.as<Properties *>(),
+                           regions);
   ShapeAdaptor inputShape = operands.getShape(0);
   Type inputType = getElementTypeOrSelf(operands.getType()[0]);
   llvm::SmallVector<int64_t> newShapeValue =
@@ -861,7 +865,8 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
     ValueShapeRange operands, DictionaryAttr attributes,
     OpaqueProperties properties, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
-  ResizeOpAdaptor adaptor(operands, attributes);
+  ResizeOpAdaptor adaptor(operands, attributes, *properties.as<Properties *>(),
+                          regions);
   llvm::SmallVector<int64_t, 4> outputShape;
   outputShape.resize(4, ShapedType::kDynamic);
 
@@ -1083,7 +1088,8 @@ LogicalResult Conv2DOp::inferReturnTypeComponents(
     OpaqueProperties properties, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamic);
-  Conv2DOp::Adaptor adaptor(operands.getValues(), attributes);
+  Conv2DOp::Adaptor adaptor(operands, attributes,
+                            *properties.as<Properties *>(), regions);
 
   int64_t inputWidth = ShapedType::kDynamic;
   int64_t inputHeight = ShapedType::kDynamic;
@@ -1147,7 +1153,8 @@ LogicalResult Conv3DOp::inferReturnTypeComponents(
     OpaqueProperties properties, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   llvm::SmallVector<int64_t> outputShape(5, ShapedType::kDynamic);
-  Conv3DOp::Adaptor adaptor(operands.getValues(), attributes);
+  Conv3DOp::Adaptor adaptor(operands, attributes,
+                            *properties.as<Properties *>(), regions);
 
   int64_t inputWidth = ShapedType::kDynamic;
   int64_t inputHeight = ShapedType::kDynamic;
@@ -1237,7 +1244,8 @@ LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
     OpaqueProperties properties, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
   llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamic);
-  DepthwiseConv2DOp::Adaptor adaptor(operands.getValues(), attributes);
+  DepthwiseConv2DOp::Adaptor adaptor(operands, attributes,
+                                     *properties.as<Properties *>(), regions);
 
   int64_t inputWidth = ShapedType::kDynamic;
   int64_t inputHeight = ShapedType::kDynamic;
@@ -1313,7 +1321,8 @@ LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
     ValueShapeRange operands, DictionaryAttr attributes,
     OpaqueProperties properties, RegionRange regions,
     SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
-  TransposeConv2DOp::Adaptor adaptor(operands.getValues(), attributes);
+  TransposeConv2DOp::Adaptor adaptor(operands, attributes,
+                                     *properties.as<Properties *>(), regions);
   // outputShape is mutable.
   llvm::SmallVector<int64_t> outputShape =
       convertToMlirShape(adaptor.getOutShape());

diff  --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
index 5ef44d27be61d..838e957ae16b9 100644
--- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
+++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir
@@ -318,7 +318,7 @@ func.func @avg_pool_i8(%arg0: tensor<1x6x34x62xi8>) -> (tensor<1x5x33x62xi8>) {
   // CHECK: %[[TRUNC_SHIFT:.+]] = arith.trunci %[[SUB]]
   // CHECK: %[[C30:.+]] = arith.constant 30
   // CHECK: %[[SHIFT:.+]] = arith.addi %[[TRUNC_SHIFT]], %[[C30]] : i8
-  // CHECK: %[[SCALED:.+]] = "tosa.apply_scale"(%[[IN]], %[[TRUNC_MUL]], %[[SHIFT]]) {double_round = false}
+  // CHECK: %[[SCALED:.+]] = "tosa.apply_scale"(%[[IN]], %[[TRUNC_MUL]], %[[SHIFT]]) <{double_round = false}>
 
   // Perform the normalization.
   // CHECK: %[[CMIN:.+]] = arith.constant -128

diff  --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
index e9e9037ebcdb5..3e654ab9c56b0 100644
--- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
+++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
@@ -137,8 +137,8 @@ func.func @test_broadcast_swapped_args(%arg0: tensor<2xf32>, %arg1: tensor<1xf32
 // CHECK-SAME:  %[[ARG1:[0-9a-zA-Z_]*]]
 func.func @test_multibroadcast(%arg0: tensor<1x3xf32>, %arg1: tensor<2x1xf32>) -> tensor<2x3xf32> {
   // CHECK: [[INIT:%.+]] = tensor.empty() : tensor<2x3xf32>
-  // CHECK: [[RESHAPE1:%.+]] = "tosa.reshape"(%[[ARG0]]) {new_shape = array<i64: 3>}
-  // CHECK: [[RESHAPE2:%.+]] = "tosa.reshape"(%[[ARG1]]) {new_shape = array<i64: 2>}
+  // CHECK: [[RESHAPE1:%.+]] = "tosa.reshape"(%[[ARG0]]) <{new_shape = array<i64: 3>}
+  // CHECK: [[RESHAPE2:%.+]] = "tosa.reshape"(%[[ARG1]]) <{new_shape = array<i64: 2>}
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], #[[$MAP0]]], iterator_types = ["parallel", "parallel"]} ins([[RESHAPE1]], [[RESHAPE2]] : tensor<3xf32>, tensor<2xf32>) outs([[INIT]] : tensor<2x3xf32>) {
   // CHECK: ^bb0(%[[ARG2:.*]]: f32, %[[ARG3:.*]]: f32, %[[ARG4:.*]]: f32):
   // CHECK:   [[ELEMENT:%.+]] = arith.addf %[[ARG2]], %[[ARG3]] : f32
@@ -833,7 +833,7 @@ func.func @rescale_i8(%arg0 : tensor<2xi8>) -> () {
   // CHECK: [[C22:%.+]] = arith.constant 22
   // CHECK-DAG: [[IN32:%.+]] = arith.extsi [[IN]]
   // CHECK-DAG: [[IN_ZEROED:%.+]] = arith.subi [[IN32]], [[C17]]
-  // CHECK-DAG: [[SCALED:%.+]] = "tosa.apply_scale"([[IN_ZEROED]], [[C0]], [[C1]]) {double_round = false}
+  // CHECK-DAG: [[SCALED:%.+]] = "tosa.apply_scale"([[IN_ZEROED]], [[C0]], [[C1]]) <{double_round = false}
   // CHECK-DAG: [[SCALED_ZEROED:%.+]] = arith.addi [[SCALED]], [[C22]]
   // CHECK-DAG: [[CMIN:%.+]] = arith.constant -128
   // CHECK-DAG: [[CMAX:%.+]] = arith.constant 127
@@ -854,7 +854,7 @@ func.func @rescale_i8(%arg0 : tensor<2xi8>) -> () {
   // CHECK: [[C22:%.+]] = arith.constant 22
   // CHECK-DAG: [[IN32:%.+]] = arith.extsi [[IN]]
   // CHECK-DAG: [[IN_ZEROED:%.+]] = arith.subi [[IN32]], [[C17]]
-  // CHECK-DAG: [[SCALED:%.+]] = "tosa.apply_scale"([[IN_ZEROED]], [[C0]], [[C1]]) {double_round = false}
+  // CHECK-DAG: [[SCALED:%.+]] = "tosa.apply_scale"([[IN_ZEROED]], [[C0]], [[C1]]) <{double_round = false}
   // CHECK-DAG: [[SCALED_ZEROED:%.+]] = arith.addi [[SCALED]], [[C22]]
   // CHECK-DAG: [[CMIN:%.+]] = arith.constant 0
   // CHECK-DAG: [[CMAX:%.+]] = arith.constant 255
@@ -927,7 +927,7 @@ func.func @rescale_ui8(%arg0 : tensor<2xui8>) -> () {
   // CHECK-DAG: [[CAST:%.+]] = builtin.unrealized_conversion_cast [[IN]] : ui8 to i8
   // CHECK-DAG: [[IN32:%.+]] = arith.extui [[CAST]]
   // CHECK-DAG: [[IN_ZEROED:%.+]] = arith.subi [[IN32]], [[C17]]
-  // CHECK-DAG: [[SCALED:%.+]] = "tosa.apply_scale"([[IN_ZEROED]], [[C0]], [[C1]]) {double_round = false}
+  // CHECK-DAG: [[SCALED:%.+]] = "tosa.apply_scale"([[IN_ZEROED]], [[C0]], [[C1]]) <{double_round = false}
   // CHECK-DAG: [[SCALED_ZEROED:%.+]] = arith.addi [[SCALED]], [[C22]]
   // CHECK-DAG: [[CMIN:%.+]] = arith.constant -128
   // CHECK-DAG: [[CMAX:%.+]] = arith.constant 127
@@ -959,7 +959,7 @@ func.func @rescale_per_channel(%arg0 : tensor<3xi8>) -> (tensor<3xi8>) {
 
   // CHECK-DAG: [[IN32:%.+]] = arith.extsi [[IN]]
   // CHECK-DAG: [[IN_ZEROED:%.+]] = arith.subi [[IN32]], [[C243]]
-  // CHECK-DAG: [[SCALED:%.+]] = "tosa.apply_scale"([[IN_ZEROED]], [[MULTIPLIER]], [[SHIFT]]) {double_round = false}
+  // CHECK-DAG: [[SCALED:%.+]] = "tosa.apply_scale"([[IN_ZEROED]], [[MULTIPLIER]], [[SHIFT]]) <{double_round = false}
   // CHECK-DAG: [[SCALED_ZEROED:%.+]] = arith.addi [[SCALED]], [[C252]]
   // CHECK-DAG: [[CMIN:%.+]] = arith.constant -128
   // CHECK-DAG: [[CMAX:%.+]] = arith.constant 127
@@ -1065,21 +1065,21 @@ func.func @tile(%arg0 : tensor<2x3xi8>) -> () {
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs([[INIT]] : tensor<2x2x1x3xi8>)
   // CHECK: ^bb0(%[[ARG1:[0-9a-zA-Z_]+]]: i8
   // CHECK:   linalg.yield %[[ARG1]] : i8
-  // CHECK: "tosa.reshape"([[GENERIC]]) {new_shape = array<i64: 4, 3>}
+  // CHECK: "tosa.reshape"([[GENERIC]]) <{new_shape = array<i64: 4, 3>}
   %0 = "tosa.tile"(%arg0) {multiples = array<i64: 2, 1>} : (tensor<2x3xi8>)  -> (tensor<4x3xi8>)
 
   // CHECK: [[INIT:%.+]] = tensor.empty()
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs([[INIT]] : tensor<1x2x2x3xi8>)
   // CHECK: ^bb0(%[[ARG1:[0-9a-zA-Z_]+]]: i8
   // CHECK:   linalg.yield %[[ARG1]] : i8
-  // CHECK: "tosa.reshape"([[GENERIC]]) {new_shape = array<i64: 2, 6>}
+  // CHECK: "tosa.reshape"([[GENERIC]]) <{new_shape = array<i64: 2, 6>}
   %1 = "tosa.tile"(%arg0) {multiples = array<i64: 1, 2>} : (tensor<2x3xi8>)  -> (tensor<2x6xi8>)
 
   // CHECK: [[INIT:%.+]] = tensor.empty()
   // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs([[INIT]] : tensor<5x2x7x3xi8>)
   // CHECK: ^bb0(%[[ARG1:[0-9a-zA-Z_]+]]: i8
   // CHECK:   linalg.yield %[[ARG1]] : i8
-  // CHECK: "tosa.reshape"([[GENERIC]]) {new_shape = array<i64: 10, 21>}
+  // CHECK: "tosa.reshape"([[GENERIC]]) <{new_shape = array<i64: 10, 21>}
   %2 = "tosa.tile"(%arg0) {multiples = array<i64: 5, 7>} : (tensor<2x3xi8>)  -> (tensor<10x21xi8>)
 
   return
@@ -1099,7 +1099,7 @@ func.func @tile_dyn_input(%arg0 : tensor<?x3xi8>) -> () {
   // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<?x3xi8>) outs(%[[INIT]] : tensor<2x?x1x3xi8>)
   // CHECK: ^bb0(%[[ARG1:.+]]: i8,
   // CHECK:   linalg.yield %[[ARG1]] : i8
-  // CHECK: "tosa.reshape"(%[[GENERIC]]) {new_shape = array<i64: -9223372036854775808, 3>}
+  // CHECK: "tosa.reshape"(%[[GENERIC]]) <{new_shape = array<i64: -9223372036854775808, 3>}
   %0 = "tosa.tile"(%arg0) {multiples = array<i64: 2, 1>} : (tensor<?x3xi8>)  -> (tensor<?x3xi8>)
 
   return
@@ -1119,7 +1119,7 @@ func.func @tile_dyn_multiples(%arg0 : tensor<2x3xi8>) -> () {
   // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[ARG0]] : tensor<2x3xi8>) outs(%[[INIT]] : tensor<2x2x?x3xi8>)
   // CHECK: ^bb0(%[[ARG1:.+]]: i8,
   // CHECK:   linalg.yield %[[ARG1]] : i8
-  // CHECK: "tosa.reshape"(%[[GENERIC]]) {new_shape = array<i64: 2, -9223372036854775808>}
+  // CHECK: "tosa.reshape"(%[[GENERIC]]) <{new_shape = array<i64: 2, -9223372036854775808>}
   %0 = "tosa.tile"(%arg0) {multiples = array<i64: 2, -1>} : (tensor<2x3xi8>)  -> (tensor<2x?xi8>)
 
   return

diff  --git a/mlir/test/Dialect/Tosa/broadcast.mlir b/mlir/test/Dialect/Tosa/broadcast.mlir
index ed1cd1e17b24d..282c386294027 100644
--- a/mlir/test/Dialect/Tosa/broadcast.mlir
+++ b/mlir/test/Dialect/Tosa/broadcast.mlir
@@ -11,7 +11,7 @@ func.func @test_broadcast0(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor
 // -----
 // CHECK-LABEL: broadcast1
 func.func @test_broadcast1(%arg0: tensor<1xf32>, %arg1: tensor<2x1xf32>) -> tensor<2x1xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1)
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<1xf32>, tensor<2x1xf32>) -> tensor<2x1xf32>
   return %0 : tensor<2x1xf32>
@@ -20,7 +20,7 @@ func.func @test_broadcast1(%arg0: tensor<1xf32>, %arg1: tensor<2x1xf32>) -> tens
 // -----
 // CHECK-LABEL: broadcast2
 func.func @test_broadcast2(%arg0: tensor<2x1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]])
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<2x1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
   return %0 : tensor<2x1xf32>
@@ -29,7 +29,7 @@ func.func @test_broadcast2(%arg0: tensor<2x1xf32>, %arg1: tensor<1xf32>) -> tens
 // -----
 // CHECK-LABEL: broadcast3
 func.func @test_broadcast3(%arg0: tensor<2x1x1x1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1x1x1xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]])
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<2x1x1x1xf32>, tensor<1xf32>) -> tensor<2x1x1x1xf32>
   return %0 : tensor<2x1x1x1xf32>
@@ -38,7 +38,7 @@ func.func @test_broadcast3(%arg0: tensor<2x1x1x1xf32>, %arg1: tensor<1xf32>) ->
 // -----
 // CHECK-LABEL: broadcast4
 func.func @test_broadcast4(%arg0: tensor<1x1x1x2xf32>, %arg1: tensor<1xf32>) -> tensor<1x1x1x2xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]])
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<1x1x1x2xf32>, tensor<1xf32>) -> tensor<1x1x1x2xf32>
   return %0 : tensor<1x1x1x2xf32>
@@ -47,7 +47,7 @@ func.func @test_broadcast4(%arg0: tensor<1x1x1x2xf32>, %arg1: tensor<1xf32>) ->
 // -----
 // CHECK-LABEL: broadcast5
 func.func @test_broadcast5(%arg0: tensor<1x1x2x1xf32>, %arg1: tensor<1xf32>) -> tensor<1x1x2x1xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]])
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<1x1x2x1xf32>, tensor<1xf32>) -> tensor<1x1x2x1xf32>
   return %0 : tensor<1x1x2x1xf32>
@@ -56,7 +56,7 @@ func.func @test_broadcast5(%arg0: tensor<1x1x2x1xf32>, %arg1: tensor<1xf32>) ->
 // -----
 // CHECK-LABEL: broadcast6
 func.func @test_broadcast6(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<1xf32>) -> tensor<17x16x15x14xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]])
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<17x16x15x14xf32>, tensor<1xf32>) -> tensor<17x16x15x14xf32>
   return %0 : tensor<17x16x15x14xf32>
@@ -65,7 +65,7 @@ func.func @test_broadcast6(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<1xf32>)
 // -----
 // CHECK-LABEL: broadcast7
 func.func @test_broadcast7(%arg0: tensor<17x16x1x14xf32>, %arg1: tensor<1x1xf32>) -> tensor<17x16x1x14xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]])
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<17x16x1x14xf32>, tensor<1x1xf32>) -> tensor<17x16x1x14xf32>
   return %0 : tensor<17x16x1x14xf32>
@@ -74,7 +74,7 @@ func.func @test_broadcast7(%arg0: tensor<17x16x1x14xf32>, %arg1: tensor<1x1xf32>
 // -----
 // CHECK-LABEL: broadcast8
 func.func @test_broadcast8(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<1x1xf32>) -> tensor<17x16x15x14xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]])
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<17x16x15x14xf32>, tensor<1x1xf32>) -> tensor<17x16x15x14xf32>
   return %0 : tensor<17x16x15x14xf32>
@@ -83,7 +83,7 @@ func.func @test_broadcast8(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<1x1xf32
 // -----
 // CHECK-LABEL: broadcast9
 func.func @test_broadcast9(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<15x1xf32>) -> tensor<17x16x15x14xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 15, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 15, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]])
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<17x16x15x14xf32>, tensor<15x1xf32>) -> tensor<17x16x15x14xf32>
   return %0 : tensor<17x16x15x14xf32>
@@ -92,7 +92,7 @@ func.func @test_broadcast9(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<15x1xf3
 // -----
 // CHECK-LABEL: broadcast10
 func.func @test_broadcast10(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<15x14xf32>) -> tensor<17x16x15x14xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 15, 14>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 15, 14>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]])
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<17x16x15x14xf32>, tensor<15x14xf32>) -> tensor<17x16x15x14xf32>
   return %0 : tensor<17x16x15x14xf32>
@@ -101,7 +101,7 @@ func.func @test_broadcast10(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<15x14x
 // -----
 // CHECK-LABEL: broadcast13
 func.func @test_broadcast13(%arg0: tensor<1xf32>, %arg1: tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1)
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32>
   return %0 : tensor<17x16x15x14xf32>
@@ -110,7 +110,7 @@ func.func @test_broadcast13(%arg0: tensor<1xf32>, %arg1: tensor<17x16x15x14xf32>
 // -----
 // CHECK-LABEL: broadcast14
 func.func @test_broadcast14(%arg0: tensor<1x1xf32>, %arg1: tensor<17x16x1x14xf32>) -> tensor<17x16x1x14xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1)
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<1x1xf32>, tensor<17x16x1x14xf32>) -> tensor<17x16x1x14xf32>
   return %0 : tensor<17x16x1x14xf32>
@@ -119,7 +119,7 @@ func.func @test_broadcast14(%arg0: tensor<1x1xf32>, %arg1: tensor<17x16x1x14xf32
 // -----
 // CHECK-LABEL: broadcast15
 func.func @test_broadcast15(%arg0: tensor<1x1xf32>, %arg1: tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1)
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<1x1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32>
   return %0 : tensor<17x16x15x14xf32>
@@ -128,7 +128,7 @@ func.func @test_broadcast15(%arg0: tensor<1x1xf32>, %arg1: tensor<17x16x15x14xf3
 // -----
 // CHECK-LABEL: broadcast16
 func.func @test_broadcast16(%arg0: tensor<15x1xf32>, %arg1: tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1, 15, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1, 15, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1)
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<15x1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32>
   return %0 : tensor<17x16x15x14xf32>
@@ -137,7 +137,7 @@ func.func @test_broadcast16(%arg0: tensor<15x1xf32>, %arg1: tensor<17x16x15x14xf
 // -----
 // CHECK-LABEL: broadcast17
 func.func @test_broadcast17(%arg0: tensor<15x14xf32>, %arg1: tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1, 15, 14>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1, 15, 14>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1)
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<15x14xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32>
   return %0 : tensor<17x16x15x14xf32>
@@ -154,7 +154,7 @@ func.func @test_broadcast18(%arg0: tensor<14x1xf32>, %arg1: tensor<1x15xf32>) ->
 // -----
 // CHECK-LABEL: broadcast19
 func.func @test_broadcast19(%arg0: tensor<64x64x1xf32>, %arg1: tensor<1x17xf32>) -> (tensor<64x64x17xf32> ) {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 17>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 17>}
   // CHECK: %[[VAR1:.*]] = "tosa.sub"(%arg0, %[[VAR0]])
   %0 = "tosa.sub"(%arg0, %arg1) : (tensor<64x64x1xf32>, tensor<1x17xf32>) -> tensor<64x64x17xf32>
   return %0 : tensor<64x64x17xf32>
@@ -163,7 +163,7 @@ func.func @test_broadcast19(%arg0: tensor<64x64x1xf32>, %arg1: tensor<1x17xf32>)
 // -----
 // CHECK-LABEL: broadcast20
 func.func @test_broadcast20(%arg0: tensor<3x3x4x1xf32>, %arg1: tensor<4x5xf32>) -> (tensor<3x3x4x5xf32> ) {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 4, 5>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 4, 5>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%arg0, %[[VAR0]])
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<3x3x4x1xf32>, tensor<4x5xf32>) -> tensor<3x3x4x5xf32>
   return %0 : tensor<3x3x4x5xf32>
@@ -172,25 +172,25 @@ func.func @test_broadcast20(%arg0: tensor<3x3x4x1xf32>, %arg1: tensor<4x5xf32>)
 // -----
 // CHECK-LABEL: broadcast_mul
 func.func @test_broadcast_mul(%arg0: tensor<15x14xi32>, %arg1: tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1, 15, 14>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1, 15, 14>}
   // CHECK: %[[VAR1:.*]] = "tosa.mul"(%[[VAR0]], %arg1)
-  %0 = "tosa.mul"(%arg0, %arg1) {shift = 1 : i32 } : (tensor<15x14xi32>, tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32>
+  %0 = "tosa.mul"(%arg0, %arg1) <{shift = 1 : i32 }> : (tensor<15x14xi32>, tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32>
   return %0 : tensor<17x16x15x14xi32>
 }
 
 // -----
 // CHECK-LABEL: broadcast_arithmetic_right_shift
 func.func @test_broadcast_arithmetic_right_shift(%arg0: tensor<15x14xi32>, %arg1: tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1, 15, 14>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1, 15, 14>}
   // CHECK: %[[VAR1:.*]] = "tosa.arithmetic_right_shift"(%[[VAR0]], %arg1)
-  %0 = "tosa.arithmetic_right_shift"(%arg0, %arg1) { round = true } : (tensor<15x14xi32>, tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32>
+  %0 = "tosa.arithmetic_right_shift"(%arg0, %arg1) <{ round = true }> : (tensor<15x14xi32>, tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32>
   return %0 : tensor<17x16x15x14xi32>
 }
 
 // -----
 // CHECK-LABEL: broadcast_scalar
 func.func @test_broadcast_scalar(%arg0: tensor<i32>, %arg1: tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32> {
-  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAR1:.*]] = "tosa.add"(%[[VAR0]], %arg1)
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<i32>, tensor<17x16x15x14xi32>) -> tensor<17x16x15x14xi32>
   return %0 : tensor<17x16x15x14xi32>
@@ -199,8 +199,8 @@ func.func @test_broadcast_scalar(%arg0: tensor<i32>, %arg1: tensor<17x16x15x14xi
 // -----
 // CHECK-LABEL: broadcast_select_both_input
 func.func @test_broadcast_select_both_input(%arg0: tensor<1x16x16xi1>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<1x16x16xf32> {
-  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 1>}
-  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) {new_shape = array<i64: 1, 1, 1>}
+  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 1>}
+  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) <{new_shape = array<i64: 1, 1, 1>}
   // CHECK: %[[VAL_2:.*]] = "tosa.select"(%arg0, %[[VAL_0]], %[[VAL_1]])
   %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x16x16xi1>, tensor<f32>, tensor<f32>) -> tensor<1x16x16xf32>
   return %0 : tensor<1x16x16xf32>
@@ -209,7 +209,7 @@ func.func @test_broadcast_select_both_input(%arg0: tensor<1x16x16xi1>, %arg1: te
 // -----
 // CHECK-LABEL: broadcast_select_one_input
 func.func @test_broadcast_select_one_input(%arg0: tensor<17x16x15x14xi1>, %arg1: tensor<17x16x15x14xf32>, %arg2: tensor<f32>) -> tensor<17x16x15x14xf32> {
-  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg2) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg2) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAL_1:.*]] = "tosa.select"(%arg0, %arg1, %[[VAL_0]])
   %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<17x16x15x14xi1>, tensor<17x16x15x14xf32>, tensor<f32>) -> tensor<17x16x15x14xf32>
   return %0 : tensor<17x16x15x14xf32>
@@ -218,7 +218,7 @@ func.func @test_broadcast_select_one_input(%arg0: tensor<17x16x15x14xi1>, %arg1:
 // -----
 // CHECK-LABEL: broadcast_select_predicate
 func.func @test_broadcast_select_predicate(%arg0: tensor<i1>, %arg1: tensor<1x32x32x8xf32>, %arg2: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> {
-  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAL_1:.*]] = "tosa.select"(%[[VAL_0]], %arg1, %arg2)
   %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<i1>, tensor<1x32x32x8xf32>, tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32>
   return %0 : tensor<1x32x32x8xf32>
@@ -227,8 +227,8 @@ func.func @test_broadcast_select_predicate(%arg0: tensor<i1>, %arg1: tensor<1x32
 // -----
 // CHECK-LABEL: broadcast_select_abc
 func.func @test_broadcast_select_abc(%arg0: tensor<i1>, %arg1: tensor<32x8xf32>, %arg2: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> {
-  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1, 1, 1>}
-  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 32, 8>}
+  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 32, 8>}
   // CHECK: %[[VAL_2:.*]] = "tosa.select"(%[[VAL_0]], %[[VAL_1]], %arg2)
   %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<i1>, tensor<32x8xf32>, tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32>
   return %0 : tensor<1x32x32x8xf32>
@@ -237,8 +237,8 @@ func.func @test_broadcast_select_abc(%arg0: tensor<i1>, %arg1: tensor<32x8xf32>,
 // -----
 // CHECK-LABEL: broadcast_select_acb
 func.func @test_broadcast_select_acb(%arg0: tensor<i1>, %arg1: tensor<1x32x32x8xf32>, %arg2: tensor<32x8xf32>) -> tensor<1x32x32x8xf32> {
-  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1, 1, 1>}
-  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) {new_shape = array<i64: 1, 1, 32, 8>}
+  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) <{new_shape = array<i64: 1, 1, 32, 8>}
   // CHECK: %[[VAL_2:.*]] = "tosa.select"(%[[VAL_0]], %arg1, %[[VAL_1]])
   %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<i1>, tensor<1x32x32x8xf32>, tensor<32x8xf32>) -> tensor<1x32x32x8xf32>
   return %0 : tensor<1x32x32x8xf32>
@@ -247,8 +247,8 @@ func.func @test_broadcast_select_acb(%arg0: tensor<i1>, %arg1: tensor<1x32x32x8x
 // -----
 // CHECK-LABEL: broadcast_select_bac
 func.func @test_broadcast_select_bac(%arg0: tensor<32x8xi1>, %arg1: tensor<f32>, %arg2: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> {
-  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1, 32, 8>}
-  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1, 32, 8>}
+  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAL_2:.*]] = "tosa.select"(%[[VAL_0]], %[[VAL_1]], %arg2)
   %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<32x8xi1>, tensor<f32>, tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32>
   return %0 : tensor<1x32x32x8xf32>
@@ -257,8 +257,8 @@ func.func @test_broadcast_select_bac(%arg0: tensor<32x8xi1>, %arg1: tensor<f32>,
 // -----
 // CHECK-LABEL: broadcast_select_bca
 func.func @test_broadcast_select_bca(%arg0: tensor<32x8xi1>, %arg1: tensor<1x32x32x8xf32>, %arg2: tensor<i1>) -> tensor<1x32x32x8xf32> {
-  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 1, 32, 8>}
-  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 1, 32, 8>}
+  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAL_2:.*]] = "tosa.select"(%[[VAL_0]], %arg1, %[[VAL_1]])
   %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<32x8xi1>, tensor<1x32x32x8xf32>, tensor<i1>) -> tensor<1x32x32x8xf32>
   return %0 : tensor<1x32x32x8xf32>
@@ -267,8 +267,8 @@ func.func @test_broadcast_select_bca(%arg0: tensor<32x8xi1>, %arg1: tensor<1x32x
 // -----
 // CHECK-LABEL: broadcast_select_cab
 func.func @test_broadcast_select_cab(%arg0: tensor<1x32x32x8xi1>, %arg1: tensor<f32>, %arg2: tensor<32x8xf32>) -> tensor<1x32x32x8xf32> {
-  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 1, 1>}
-  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) {new_shape = array<i64: 1, 1, 32, 8>}
+  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) <{new_shape = array<i64: 1, 1, 32, 8>}
   // CHECK: %[[VAL_2:.*]] = "tosa.select"(%arg0, %[[VAL_0]], %[[VAL_1]])
   %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x32x32x8xi1>, tensor<f32>, tensor<32x8xf32>) -> tensor<1x32x32x8xf32>
   return %0 : tensor<1x32x32x8xf32>
@@ -277,8 +277,8 @@ func.func @test_broadcast_select_cab(%arg0: tensor<1x32x32x8xi1>, %arg1: tensor<
 // -----
 // CHECK-LABEL: broadcast_select_cba
 func.func @test_broadcast_select_cba(%arg0: tensor<1x32x32x8xi1>, %arg1: tensor<32x8xf32>, %arg2: tensor<i1>) -> tensor<1x32x32x8xf32> {
-  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 1, 1, 32, 8>}
-  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) {new_shape = array<i64: 1, 1, 1, 1>}
+  // CHECK-DAG: %[[VAL_0:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 1, 1, 32, 8>}
+  // CHECK-DAG: %[[VAL_1:.*]] = "tosa.reshape"(%arg2) <{new_shape = array<i64: 1, 1, 1, 1>}
   // CHECK: %[[VAL_2:.*]] = "tosa.select"(%arg0, %[[VAL_0]], %[[VAL_1]])
   %0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x32x32x8xi1>, tensor<32x8xf32>, tensor<i1>) -> tensor<1x32x32x8xf32>
   return %0 : tensor<1x32x32x8xf32>

diff  --git a/mlir/test/Dialect/Tosa/canonicalize.mlir b/mlir/test/Dialect/Tosa/canonicalize.mlir
index eacff38ebeabf..adcb16db29c1a 100644
--- a/mlir/test/Dialect/Tosa/canonicalize.mlir
+++ b/mlir/test/Dialect/Tosa/canonicalize.mlir
@@ -80,7 +80,7 @@ func.func @clamp_uint8_is_noop(%arg0: tensor<4xui8>) -> tensor<4xui8> {
 
 // CHECK-LABEL: @clamp_twice_is_single_clamp
 func.func @clamp_twice_is_single_clamp(%arg0: tensor<4xi8>) -> tensor<4xi8> {
-  // CHECK: "tosa.clamp"(%arg0) {max_fp = 3.000000e+00 : f32, max_int = 2 : i64, min_fp = -3.000000e+00 : f32, min_int = -2 : i64}
+  // CHECK: "tosa.clamp"(%arg0) <{max_fp = 3.000000e+00 : f32, max_int = 2 : i64, min_fp = -3.000000e+00 : f32, min_int = -2 : i64}
   %0 = "tosa.clamp"(%arg0) {max_fp = 3.0 : f32, max_int = 4 : i64, min_fp = -5.0 : f32, min_int = -2 : i64} :  (tensor<4xi8>) -> tensor<4xi8>
   %1 = "tosa.clamp"(%0) {max_fp = 5.0 : f32, max_int = 2 : i64, min_fp = -3.0 : f32, min_int = -4 : i64} :  (tensor<4xi8>) -> tensor<4xi8>
   return %1 : tensor<4xi8>
@@ -151,7 +151,7 @@ func.func @pad_noop(%arg0: tensor<?x?xf32>) -> tensor<?x?xf32> {
 
 // CHECK-LABEL: @pad_determine_val_i32
 func.func @pad_determine_val_i32(%arg0: tensor<?x?xi32>, %arg1 : tensor<2x2xi32>) -> tensor<?x?xi32> {
-  // CHECK: %[[ZERO:.+]] = "tosa.const"() {value = dense<0> : tensor<i32>}
+  // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0> : tensor<i32>}
   // CHECK: "tosa.pad"(%arg0, %arg1, %[[ZERO]])
   %0 = "tosa.const"() { value = dense<[[1, 0], [0, 1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32>
   %1 = "tosa.pad"(%arg0, %arg1) : (tensor<?x?xi32>, tensor<2x2xi32>) -> tensor<?x?xi32>
@@ -160,7 +160,7 @@ func.func @pad_determine_val_i32(%arg0: tensor<?x?xi32>, %arg1 : tensor<2x2xi32>
 
 // CHECK-LABEL: @pad_determine_val_f32
 func.func @pad_determine_val_f32(%arg0: tensor<?x?xf32>, %arg1 : tensor<2x2xi32>) -> tensor<?x?xf32> {
-  // CHECK: %[[ZERO:.+]] = "tosa.const"() {value = dense<0.000000e+00> : tensor<f32>}
+  // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor<f32>}
   // CHECK: "tosa.pad"(%arg0, %arg1, %[[ZERO]])
   %0 = "tosa.const"() { value = dense<[[1, 0], [0, 1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32>
   %1 = "tosa.pad"(%arg0, %arg1) : (tensor<?x?xf32>, tensor<2x2xi32>) -> tensor<?x?xf32>
@@ -169,7 +169,7 @@ func.func @pad_determine_val_f32(%arg0: tensor<?x?xf32>, %arg1 : tensor<2x2xi32>
 
 // CHECK-LABEL: @pad_determine_val_quant
 func.func @pad_determine_val_quant(%arg0: tensor<?x?xi32>, %arg1 : tensor<2x2xi32>) -> tensor<?x?xi32> {
-  // CHECK: %[[ZERO:.+]] = "tosa.const"() {value = dense<42> : tensor<i32>}
+  // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<42> : tensor<i32>}
   // CHECK: "tosa.pad"(%arg0, %arg1, %[[ZERO]])
   %0 = "tosa.const"() { value = dense<[[1, 0], [0, 1]]> : tensor<2x2xi32>} : () -> tensor<2x2xi32>
   %1 = "tosa.pad"(%arg0, %arg1) {quantization_info = #tosa.pad_quant<input_zp = 42>} : (tensor<?x?xi32>, tensor<2x2xi32>) -> tensor<?x?xi32>
@@ -330,7 +330,7 @@ func.func @reshape_canonicalize(%arg0: tensor<?x10xf32>) -> tensor<?x10xf32> {
 
 // CHECK-LABEL: @reshape_canonicalize_double
 func.func @reshape_canonicalize_double(%arg0: tensor<?x10xf32>) -> tensor<?x5xf32> {
-  // CHECK: %[[VAR0:.+]] = "tosa.reshape"(%arg0) {new_shape = array<i64: -1, 5>}
+  // CHECK: %[[VAR0:.+]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: -1, 5>}
   // CHECK: return %[[VAR0]]
   %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 5, -1>}: (tensor<?x10xf32>) -> tensor<5x?xf32>
   %1 = "tosa.reshape"(%0) {new_shape = array<i64: -1, 5>}: (tensor<5x?xf32>) -> tensor<?x5xf32>
@@ -339,7 +339,7 @@ func.func @reshape_canonicalize_double(%arg0: tensor<?x10xf32>) -> tensor<?x5xf3
 
 // CHECK-LABEL: @reshape_canonicalize_const
 func.func @reshape_canonicalize_const() -> tensor<1x10xi32> {
-  // CHECK: %[[VAR0:.+]] = "tosa.const"() {value = dense<0> : tensor<1x10xi32>}
+  // CHECK: %[[VAR0:.+]] = "tosa.const"() <{value = dense<0> : tensor<1x10xi32>}
   // CHECK: return %[[VAR0]]
   %0 = "tosa.const"() {value = dense<0> : tensor<10xi32>} : () -> tensor<10xi32>
   %1 = "tosa.reshape"(%0) {new_shape = array<i64: 1, 10>} : (tensor<10xi32>) -> tensor<1x10xi32>
@@ -348,8 +348,8 @@ func.func @reshape_canonicalize_const() -> tensor<1x10xi32> {
 
 // CHECK-LABEL: @reshape_canonicalize_const_spat
 func.func @reshape_canonicalize_const_spat() -> (tensor<10xi32>, tensor<1x10xi32>) {
-  // CHECK-DAG: %[[VAR0:.+]] = "tosa.const"() {value = dense<0> : tensor<10xi32>}
-  // CHECK-DAG: %[[VAR1:.+]] = "tosa.const"() {value = dense<0> : tensor<1x10xi32>}
+  // CHECK-DAG: %[[VAR0:.+]] = "tosa.const"() <{value = dense<0> : tensor<10xi32>}
+  // CHECK-DAG: %[[VAR1:.+]] = "tosa.const"() <{value = dense<0> : tensor<1x10xi32>}
   // CHECK: return %[[VAR0]], %[[VAR1]]
   %0 = "tosa.const"() {value = dense<0> : tensor<10xi32>} : () -> tensor<10xi32>
   %1 = "tosa.reshape"(%0) {new_shape = array<i64: 1, 10>} : (tensor<10xi32>) -> tensor<1x10xi32>
@@ -366,7 +366,7 @@ func.func @reshape_canonicalize_const_sparse() -> (tensor<3xi32>, tensor<1x3xi32
 
 // CHECK-LABEL: @reshape_canonicalize_quant
 func.func @reshape_canonicalize_quant() -> (tensor<1x3x!quant.uniform<i8:f32, 1.000000e+00>>) {
-  // CHECK{literal}: "tosa.const"() {value = dense<[[1, 2, 3]]> : tensor<1x3xi8>} : () -> tensor<1x3x!quant.uniform<i8:f32, 1.000000e+00>> 
+  // CHECK{LITERAL}: "tosa.const"() <{value = dense<[[1, 2, 3]]> : tensor<1x3xi8>}> : () -> tensor<1x3x!quant.uniform<i8:f32, 1.000000e+00>>
   %0 = "tosa.const"() {value = dense<[1, 2, 3]> : tensor<3xi8>} : ()-> tensor<3x!quant.uniform<i8:f32, 1.000000e+00>>
   %1 = "tosa.reshape"(%0) {new_shape = array<i64: 1, 3>} : (tensor<3x!quant.uniform<i8:f32, 1.000000e+00>>) -> tensor<1x3x!quant.uniform<i8:f32, 1.000000e+00>>
   return %1 :  tensor<1x3x!quant.uniform<i8:f32, 1.000000e+00>>
@@ -374,7 +374,7 @@ func.func @reshape_canonicalize_quant() -> (tensor<1x3x!quant.uniform<i8:f32, 1.
 
 // CHECK-LABEL: @transpose_canonicalize_strip_quant
 func.func @transpose_canonicalize_strip_quant() -> (tensor<2x1x3xi8>) {
-  // CHECK: "tosa.const"() {value = dense<0> : tensor<2x1x3xi8>} : () -> tensor<2x1x3xi8>
+  // CHECK: "tosa.const"() <{value = dense<0> : tensor<2x1x3xi8>}> : () -> tensor<2x1x3xi8>
   %perms = "tosa.const"() {value = dense<[1, 0, 2]> : tensor<3xi32>} : () -> tensor<3xi32>
   %0 = "tosa.const"() {value = dense<0> : tensor<1x2x3xi8>} : ()-> tensor<1x2x3x!quant.uniform<i8:f32, 1.000000e+00>>
   %1 = "tosa.transpose"(%0, %perms) : (tensor<1x2x3x!quant.uniform<i8:f32, 1.000000e+00>>, tensor<3xi32>) -> tensor<2x1x3xi8>
@@ -420,8 +420,8 @@ func.func @transpose_no_op(%arg0: tensor<3x4x5x6xf32>) -> tensor<3x4x5x6xf32> {
 
 // CHECK-LABEL: @transpose_is_reshape
 func.func @transpose_is_reshape(%arg0: tensor<1x4x5x1xf32>) -> tensor<1x4x1x5xf32> {
-  // CHECK: "tosa.reshape"(%arg0) {new_shape = array<i64: 1, 4, 1, 5>} : (tensor<1x4x5x1xf32>) -> tensor<1x4x1x5xf32>
-  %perms = "tosa.const"() {value = dense<[3, 1, 0, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
+  // CHECK: "tosa.reshape"(%arg0) <{new_shape = array<i64: 1, 4, 1, 5>}> : (tensor<1x4x5x1xf32>) -> tensor<1x4x1x5xf32>
+  %perms = "tosa.const"() <{value = dense<[3, 1, 0, 2]> : tensor<4xi32>}> : () -> tensor<4xi32>
   %0 = "tosa.transpose"(%arg0, %perms) : (tensor<1x4x5x1xf32>, tensor<4xi32>) -> tensor<1x4x1x5xf32>
   return %0 : tensor<1x4x1x5xf32>
 }
@@ -429,9 +429,9 @@ func.func @transpose_is_reshape(%arg0: tensor<1x4x5x1xf32>) -> tensor<1x4x1x5xf3
 // CHECK-LABEL: @single_bit_reshape
 // https://github.com/llvm/llvm-project/issues/55440
 func.func @single_bit_reshape() -> tensor<1xi1> {
-  // CHECK: "tosa.const"() {value = dense<true> : tensor<1xi1>}
+  // CHECK: "tosa.const"() <{value = dense<true> : tensor<1xi1>}
   %0 = arith.constant dense<true> : tensor<1x1xi1>
-  %1 = "tosa.reshape"(%0) {new_shape = array<i64: 1>} : (tensor<1x1xi1>) -> tensor<1xi1>
+  %1 = "tosa.reshape"(%0) <{new_shape = array<i64: 1>}> : (tensor<1x1xi1>) -> tensor<1xi1>
   return %1 : tensor<1xi1>
 }
 
@@ -440,7 +440,7 @@ func.func @single_bit_reshape() -> tensor<1xi1> {
 // CHECK-LABEL: @fold_resize_nearest
 func.func @fold_resize_nearest(%arg0 : tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8> {
   // CHECK: return %arg0
-  %resize = "tosa.resize"(%arg0) {mode = "NEAREST_NEIGHBOR", scale = array<i64: 2, 2, 1, 1>, offset = array<i64: 0, 0>, border = array<i64: 0, 0>} : (tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8>
+  %resize = "tosa.resize"(%arg0) <{mode = "NEAREST_NEIGHBOR", scale = array<i64: 2, 2, 1, 1>, offset = array<i64: 0, 0>, border = array<i64: 0, 0>}> : (tensor<1x15x13x1xi8>) -> tensor<1x15x13x1xi8>
   return %resize : tensor<1x15x13x1xi8>
 }
 
@@ -481,9 +481,9 @@ func.func @canonicalize_concat_slice_middle_axis(%arg0 : tensor<1x12x12xf32>, %a
 
 // CHECK-LABEL: @canonicalize_cross_concat_inputs
 // CHECK-SAME: %[[VAL_0:.*]]: tensor<1x12x12xf32>, %[[VAL_1:.*]]: tensor<1x12x12xf32>
-// CHECK: %[[VAL_2:.*]] = "tosa.concat"(%[[VAL_0]], %[[VAL_1]]) {axis = 2 : i64} : (tensor<1x12x12xf32>, tensor<1x12x12xf32>) -> tensor<1x12x24xf32>
-// CHECK: %[[VAL_3:.*]] = "tosa.slice"(%[[VAL_2]]) {size = array<i64: 1, 12, 15>, start = array<i64: 0, 0, 0>} : (tensor<1x12x24xf32>) -> tensor<1x12x15xf32>
-// CHECK: %[[VAL_4:.*]] = "tosa.slice"(%[[VAL_2]]) {size = array<i64: 1, 12, 20>, start = array<i64: 0, 0, 4>} : (tensor<1x12x24xf32>) -> tensor<1x12x20xf32>
+// CHECK: %[[VAL_2:.*]] = "tosa.concat"(%[[VAL_0]], %[[VAL_1]]) <{axis = 2 : i64}> : (tensor<1x12x12xf32>, tensor<1x12x12xf32>) -> tensor<1x12x24xf32>
+// CHECK: %[[VAL_3:.*]] = "tosa.slice"(%[[VAL_2]]) <{size = array<i64: 1, 12, 15>, start = array<i64: 0, 0, 0>}> : (tensor<1x12x24xf32>) -> tensor<1x12x15xf32>
+// CHECK: %[[VAL_4:.*]] = "tosa.slice"(%[[VAL_2]]) <{size = array<i64: 1, 12, 20>, start = array<i64: 0, 0, 4>}> : (tensor<1x12x24xf32>) -> tensor<1x12x20xf32>
 // CHECK: return %[[VAL_3]], %[[VAL_4]] : tensor<1x12x15xf32>, tensor<1x12x20xf32>
 func.func @canonicalize_cross_concat_inputs(%arg0 : tensor<1x12x12xf32>, %arg1 : tensor<1x12x12xf32>) -> (tensor<1x12x15xf32>, tensor<1x12x20xf32>) {
   %0 = "tosa.concat"(%arg0, %arg1) {axis = 2 : i64} : (tensor<1x12x12xf32>, tensor<1x12x12xf32>) -> tensor<1x12x24xf32>
@@ -496,8 +496,8 @@ func.func @canonicalize_cross_concat_inputs(%arg0 : tensor<1x12x12xf32>, %arg1 :
 
 // CHECK-LABEL: @canonicalize_concat_slice_on_non_concat_axis
 // CHECK-SAME: %[[VAL_0:.*]]: tensor<1x12x12xf32>, %[[VAL_1:.*]]: tensor<1x12x12xf32>
-// CHECK: %[[VAL_2:.*]] = "tosa.slice"(%[[VAL_0]]) {size = array<i64: 1, 6, 12>, start = array<i64: 0, 0, 0>} : (tensor<1x12x12xf32>) -> tensor<1x6x12xf32>
-// CHECK: %[[VAL_3:.*]] = "tosa.slice"(%[[VAL_1]]) {size = array<i64: 1, 3, 12>, start = array<i64: 1, 3, 12>} : (tensor<1x12x12xf32>) -> tensor<1x3x12xf32>
+// CHECK: %[[VAL_2:.*]] = "tosa.slice"(%[[VAL_0]]) <{size = array<i64: 1, 6, 12>, start = array<i64: 0, 0, 0>}> : (tensor<1x12x12xf32>) -> tensor<1x6x12xf32>
+// CHECK: %[[VAL_3:.*]] = "tosa.slice"(%[[VAL_1]]) <{size = array<i64: 1, 3, 12>, start = array<i64: 1, 3, 12>}> : (tensor<1x12x12xf32>) -> tensor<1x3x12xf32>
 // CHECK: return %[[VAL_2]], %[[VAL_3]] : tensor<1x6x12xf32>, tensor<1x3x12xf32>
 func.func @canonicalize_concat_slice_on_non_concat_axis(%arg0 : tensor<1x12x12xf32>, %arg1 : tensor<1x12x12xf32>) -> (tensor<1x6x12xf32>, tensor<1x3x12xf32>) {
   %0 = "tosa.concat"(%arg0, %arg1) {axis = 2 : i64} : (tensor<1x12x12xf32>, tensor<1x12x12xf32>) -> tensor<1x12x24xf32>

diff  --git a/mlir/test/Dialect/Tosa/constant-op-fold.mlir b/mlir/test/Dialect/Tosa/constant-op-fold.mlir
index 6c8f5935b4cb0..ec4d8bd74a5e8 100644
--- a/mlir/test/Dialect/Tosa/constant-op-fold.mlir
+++ b/mlir/test/Dialect/Tosa/constant-op-fold.mlir
@@ -156,7 +156,7 @@ func.func @fold_add_splat_i32() -> tensor<10xi32> {
   %one = "tosa.const"() {value = dense<1> : tensor<10xi32>} : () -> tensor<10xi32>
   %two = "tosa.const"() {value = dense<2> : tensor<10xi32>} : () -> tensor<10xi32>
   %add = "tosa.add"(%one, %two) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi32>
-  // CHECK: %[[THREE:.+]] = "tosa.const"() {value = dense<3> : tensor<10xi32>}
+  // CHECK: %[[THREE:.+]] = "tosa.const"() <{value = dense<3> : tensor<10xi32>}
   // CHECK: return %[[THREE]]
   return %add : tensor<10xi32>
 }
@@ -168,7 +168,7 @@ func.func @fold_add_splat_f32() -> tensor<10xf32> {
   %one = "tosa.const"() {value = dense<1.0> : tensor<10xf32>} : () -> tensor<10xf32>
   %two = "tosa.const"() {value = dense<2.0> : tensor<10xf32>} : () -> tensor<10xf32>
   %add = "tosa.add"(%one, %two) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32>
-  // CHECK: %[[THREE:.+]] = "tosa.const"() {value = dense<3.000000e+00> : tensor<10xf32>}
+  // CHECK: %[[THREE:.+]] = "tosa.const"() <{value = dense<3.000000e+00>
   // CHECK: return %[[THREE]]
   return %add : tensor<10xf32>
 }
@@ -178,7 +178,7 @@ func.func @fold_add_splat_f32() -> tensor<10xf32> {
 // CHECK-LABEL: @fold_div_zero_lhs_i32
 func.func @fold_div_zero_lhs_i32(%arg0: tensor<i32>) -> tensor<i32> {
   %zero = "tosa.const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
-  // CHECK: %[[ZERO:.+]] = "tosa.const"() {value = dense<0> : tensor<i32>}
+  // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0>
   %div = "tosa.div"(%zero, %arg0) : (tensor<i32>, tensor<i32>) -> tensor<i32>
   // CHECK: return %[[ZERO]]
   return %div : tensor<i32>
@@ -200,7 +200,7 @@ func.func @fold_div_one_rhs_i32(%arg0: tensor<i32>) -> tensor<i32> {
 func.func @fold_div_splat_i32() -> tensor<i32> {
   %lhs = "tosa.const"() {value = dense<10> : tensor<i32>} : () -> tensor<i32>
   %rhs = "tosa.const"() {value = dense<-3> : tensor<i32>} : () -> tensor<i32>
-  // CHECK: %[[SPLAT:.+]] = "tosa.const"() {value = dense<-3> : tensor<i32>}
+  // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<-3>
   %div = "tosa.div"(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> tensor<i32>
   // CHECK: return %[[SPLAT]]
   return %div : tensor<i32>
@@ -212,7 +212,7 @@ func.func @fold_div_splat_i32() -> tensor<i32> {
 // CHECK-LABEL: @fold_mul_zero_rhs_f32
 func.func @fold_mul_zero_rhs_f32(%arg0: tensor<f32>) -> tensor<f32> {
   %zero = "tosa.const"() {value = dense<0.0> : tensor<f32>} : () -> tensor<f32>
-  // CHECK: %[[ZERO:.+]] = "tosa.const"() {value = dense<0.000000e+00> : tensor<f32>}
+  // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0.000000e+00>
   %mul = "tosa.mul"(%arg0, %zero) {shift = 0 : i32} : (tensor<f32>, tensor<f32>) -> tensor<f32>
   // CHECK: return %[[ZERO]]
   return %mul : tensor<f32>
@@ -223,7 +223,7 @@ func.func @fold_mul_zero_rhs_f32(%arg0: tensor<f32>) -> tensor<f32> {
 // CHECK-LABEL: @fold_mul_zero_lhs_f32
 func.func @fold_mul_zero_lhs_f32(%arg0: tensor<f32>) -> tensor<f32> {
   %zero = "tosa.const"() {value = dense<0.0> : tensor<f32>} : () -> tensor<f32>
-  // CHECK: %[[ZERO:.+]] = "tosa.const"() {value = dense<0.000000e+00> : tensor<f32>}
+  // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0.000000e+00>
   %mul = "tosa.mul"(%zero, %arg0) {shift = 0 : i32} : (tensor<f32>, tensor<f32>) -> tensor<f32>
   // CHECK: return %[[ZERO]]
   return %mul : tensor<f32>
@@ -234,7 +234,7 @@ func.func @fold_mul_zero_lhs_f32(%arg0: tensor<f32>) -> tensor<f32> {
 // CHECK-LABEL: @fold_mul_zero_rhs_i32
 func.func @fold_mul_zero_rhs_i32(%arg0: tensor<i32>) -> tensor<i32> {
   %zero = "tosa.const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
-  // CHECK: %[[ZERO:.+]] = "tosa.const"() {value = dense<0> : tensor<i32>}
+  // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0>
   %mul = "tosa.mul"(%arg0, %zero) {shift = 0 : i32} : (tensor<i32>, tensor<i32>) -> tensor<i32>
   // CHECK: return %[[ZERO]]
   return %mul : tensor<i32>
@@ -245,7 +245,7 @@ func.func @fold_mul_zero_rhs_i32(%arg0: tensor<i32>) -> tensor<i32> {
 // CHECK-LABEL: @fold_mul_zero_lhs_i32
 func.func @fold_mul_zero_lhs_i32(%arg0: tensor<i32>) -> tensor<i32> {
   %zero = "tosa.const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
-  // CHECK: %[[ZERO:.+]] = "tosa.const"() {value = dense<0> : tensor<i32>}
+  // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0>
   %mul = "tosa.mul"(%zero, %arg0) {shift = 0 : i32} : (tensor<i32>, tensor<i32>) -> tensor<i32>
   // CHECK: return %[[ZERO]]
   return %mul : tensor<i32>
@@ -298,7 +298,7 @@ func.func @fold_mul_splat_i8() -> tensor<10xi8> {
   %one = "tosa.const"() {value = dense<17> : tensor<10xi8>} : () -> tensor<10xi8>
   %two = "tosa.const"() {value = dense<32> : tensor<10xi8>} : () -> tensor<10xi8>
   %mul = "tosa.mul"(%one, %two) {shift = 3 : i32} : (tensor<10xi8>, tensor<10xi8>) -> tensor<10xi8>
-  // CHECK: %[[THREE:.+]] = "tosa.const"() {value = dense<68> : tensor<10xi8>}
+  // CHECK: %[[THREE:.+]] = "tosa.const"() <{value = dense<68> : tensor<10xi8>}
   // CHECK: return %[[THREE]]
   return %mul : tensor<10xi8>
 }
@@ -310,7 +310,7 @@ func.func @fold_mul_splat_f32() -> tensor<10xf32> {
   %one = "tosa.const"() {value = dense<3.0> : tensor<10xf32>} : () -> tensor<10xf32>
   %two = "tosa.const"() {value = dense<2.0> : tensor<10xf32>} : () -> tensor<10xf32>
   %mul = "tosa.mul"(%one, %two) {shift = 0 : i32} : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32>
-  // CHECK: %[[THREE:.+]] = "tosa.const"() {value = dense<6.000000e+00> : tensor<10xf32>}
+  // CHECK: %[[THREE:.+]] = "tosa.const"() <{value = dense<6.000000e+00> : tensor<10xf32>}
   // CHECK: return %[[THREE]]
   return %mul : tensor<10xf32>
 }
@@ -342,7 +342,7 @@ func.func @fold_sub_splat_i32() -> tensor<10xi32> {
   %one = "tosa.const"() {value = dense<1> : tensor<10xi32>} : () -> tensor<10xi32>
   %two = "tosa.const"() {value = dense<2> : tensor<10xi32>} : () -> tensor<10xi32>
   %sub = "tosa.sub"(%one, %two) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi32>
-  // CHECK: %[[THREE:.+]] = "tosa.const"() {value = dense<-1> : tensor<10xi32>}
+  // CHECK: %[[THREE:.+]] = "tosa.const"() <{value = dense<-1> : tensor<10xi32>}
   // CHECK: return %[[THREE]]
   return %sub : tensor<10xi32>
 }
@@ -354,7 +354,7 @@ func.func @fold_sub_splat_f32() -> tensor<10xf32> {
   %one = "tosa.const"() {value = dense<1.0> : tensor<10xf32>} : () -> tensor<10xf32>
   %two = "tosa.const"() {value = dense<2.0> : tensor<10xf32>} : () -> tensor<10xf32>
   %sub = "tosa.sub"(%one, %two) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32>
-  // CHECK: %[[THREE:.+]] = "tosa.const"() {value = dense<-1.000000e+00> : tensor<10xf32>}
+  // CHECK: %[[THREE:.+]] = "tosa.const"() <{value = dense<-1.000000e+00> : tensor<10xf32>}
   // CHECK: return %[[THREE]]
   return %sub : tensor<10xf32>
 }
@@ -369,8 +369,8 @@ func.func @fold_greater_splat_f32() -> (tensor<10xi1>, tensor<10xi1>) {
   %3 = "tosa.const"() {value = dense<2.0> : tensor<10xf32>} : () -> tensor<10xf32>
   %true = "tosa.greater"(%0, %1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1>
   %false = "tosa.greater"(%2, %3) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1>
-  // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() {value = dense<true> : tensor<10xi1>}
-  // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() {value = dense<false> : tensor<10xi1>}
+  // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() <{value = dense<true> : tensor<10xi1>}
+  // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() <{value = dense<false> : tensor<10xi1>}
   // CHECK: return %[[TRUE]], %[[FALSE]]
   return %true, %false : tensor<10xi1>, tensor<10xi1>
 }
@@ -385,8 +385,8 @@ func.func @fold_greater_splat_i32() -> (tensor<10xi1>, tensor<10xi1>) {
   %3 = "tosa.const"() {value = dense<-12> : tensor<10xi32>} : () -> tensor<10xi32>
   %false = "tosa.greater"(%0, %1) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1>
   %true = "tosa.greater"(%2, %3) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1>
-  // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() {value = dense<false> : tensor<10xi1>}
-  // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() {value = dense<true> : tensor<10xi1>}
+  // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() <{value = dense<false> : tensor<10xi1>}
+  // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() <{value = dense<true> : tensor<10xi1>}
   // CHECK: return %[[FALSE]], %[[TRUE]]
   return %false, %true : tensor<10xi1>, tensor<10xi1>
 }
@@ -401,8 +401,8 @@ func.func @fold_greater_eq_splat_f32() -> (tensor<10xi1>, tensor<10xi1>) {
   %3 = "tosa.const"() {value = dense<2.0> : tensor<10xf32>} : () -> tensor<10xf32>
   %true = "tosa.greater_equal"(%0, %1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1>
   %false = "tosa.greater_equal"(%2, %3) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1>
-  // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() {value = dense<true> : tensor<10xi1>}
-  // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() {value = dense<false> : tensor<10xi1>}
+  // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() <{value = dense<true> : tensor<10xi1>}
+  // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() <{value = dense<false> : tensor<10xi1>}
   // CHECK: return %[[TRUE]], %[[FALSE]]
   return %true, %false : tensor<10xi1>, tensor<10xi1>
 }
@@ -417,8 +417,8 @@ func.func @fold_greater_eq_splat_i32() -> (tensor<10xi1>, tensor<10xi1>) {
   %3 = "tosa.const"() {value = dense<-10> : tensor<10xi32>} : () -> tensor<10xi32>
   %true = "tosa.greater_equal"(%2, %3) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1>
   %false = "tosa.greater_equal"(%0, %1) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1>
-  // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() {value = dense<true> : tensor<10xi1>}
-  // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() {value = dense<false> : tensor<10xi1>}
+  // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() <{value = dense<true> : tensor<10xi1>}
+  // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() <{value = dense<false> : tensor<10xi1>}
   // CHECK: return %[[TRUE]], %[[FALSE]]
   return %true, %false : tensor<10xi1>, tensor<10xi1>
 }
@@ -433,8 +433,8 @@ func.func @fold_eq_splat_f32() -> (tensor<10xi1>, tensor<10xi1>) {
   %3 = "tosa.const"() {value = dense<2.0> : tensor<10xf32>} : () -> tensor<10xf32>
   %true = "tosa.equal"(%0, %1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1>
   %false = "tosa.equal"(%2, %3) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xi1>
-  // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() {value = dense<true> : tensor<10xi1>}
-  // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() {value = dense<false> : tensor<10xi1>}
+  // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() <{value = dense<true> : tensor<10xi1>}
+  // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() <{value = dense<false> : tensor<10xi1>}
   // CHECK: return %[[TRUE]], %[[FALSE]]
   return %true, %false : tensor<10xi1>, tensor<10xi1>
 }
@@ -449,8 +449,8 @@ func.func @fold_eq_splat_i32() -> (tensor<10xi1>, tensor<10xi1>) {
   %3 = "tosa.const"() {value = dense<-10> : tensor<10xi32>} : () -> tensor<10xi32>
   %true = "tosa.equal"(%2, %3) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1>
   %false = "tosa.equal"(%0, %1) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1>
-  // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() {value = dense<true> : tensor<10xi1>}
-  // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() {value = dense<false> : tensor<10xi1>}
+  // CHECK-DAG: %[[TRUE:.+]] = "tosa.const"() <{value = dense<true> : tensor<10xi1>}
+  // CHECK-DAG: %[[FALSE:.+]] = "tosa.const"() <{value = dense<false> : tensor<10xi1>}
   // CHECK: return %[[TRUE]], %[[FALSE]]
   return %true, %false : tensor<10xi1>, tensor<10xi1>
 }
@@ -459,7 +459,7 @@ func.func @fold_eq_splat_i32() -> (tensor<10xi1>, tensor<10xi1>) {
 
 // CHECK-LABEL: @fold_eq_i32
 func.func @fold_eq_i32(%arg0 : tensor<10xi32>) -> (tensor<10xi1>) {
-  // CHECK: %[[TRUE:.+]] = "tosa.const"() {value = dense<true> : tensor<10xi1>}
+  // CHECK: %[[TRUE:.+]] = "tosa.const"() <{value = dense<true> : tensor<10xi1>}
   %0 = "tosa.equal"(%arg0, %arg0) : (tensor<10xi32>, tensor<10xi32>) -> tensor<10xi1>
   // CHECK: return %[[TRUE]]
   return %0 : tensor<10xi1>
@@ -468,7 +468,7 @@ func.func @fold_eq_i32(%arg0 : tensor<10xi32>) -> (tensor<10xi1>) {
 // -----
 
 func.func @reshape_splat() -> tensor<6x5x4xi32> {
-  // CHECK: %[[SPLAT:.+]] = "tosa.const"() {value = dense<42> : tensor<6x5x4xi32>}
+  // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<42> : tensor<6x5x4xi32>}
   %splat = "tosa.const"() {value = dense<42> : tensor<4x5x6xi32>} : () -> tensor<4x5x6xi32>
   %reshape = "tosa.reshape"(%splat) { new_shape = array<i64: 6, 5, 4> } : (tensor<4x5x6xi32>) -> tensor<6x5x4xi32>
   // CHECK: return %[[SPLAT]]
@@ -479,7 +479,7 @@ func.func @reshape_splat() -> tensor<6x5x4xi32> {
 
 // CHECK-LABEL: @slice_splat
 func.func @slice_splat() -> tensor<1x1x1xi32> {
-  // CHECK: %[[SLICE:.+]] = "tosa.const"() {value = dense<42> : tensor<1x1x1xi32>}
+  // CHECK: %[[SLICE:.+]] = "tosa.const"() <{value = dense<42> : tensor<1x1x1xi32>}
   %splat = "tosa.const"() {value = dense<42> : tensor<4x5x6xi32>} : () -> tensor<4x5x6xi32>
   %slice = "tosa.slice"(%splat) { size = array<i64: 1, 1, 1>, start = array<i64: 1, 2, 3> } : (tensor<4x5x6xi32>) -> tensor<1x1x1xi32>
   // CHECK: return %[[SLICE]]
@@ -491,7 +491,7 @@ func.func @slice_splat() -> tensor<1x1x1xi32> {
 // CHECK-LABEL: @slice_singleton
 func.func @slice_singleton() -> tensor<1x1xi32> {
   %splat = "tosa.const"() {value = dense<[[0, 1, 2], [3, 4, 5], [6, 7 ,8]]> : tensor<3x3xi32>} : () -> tensor<3x3xi32>
-  // CHECK: %[[SLICE:.+]] = "tosa.const"() {value = dense<4> : tensor<1x1xi32>}
+  // CHECK: %[[SLICE:.+]] = "tosa.const"() <{value = dense<4> : tensor<1x1xi32>}
   %slice = "tosa.slice"(%splat) { size = array<i64: 1, 1>, start = array<i64: 1, 1> } : (tensor<3x3xi32>) -> tensor<1x1xi32>
   // CHECK: return %[[SLICE]]
   return %slice : tensor<1x1xi32>
@@ -502,7 +502,7 @@ func.func @slice_singleton() -> tensor<1x1xi32> {
 // CHECK: func.func @cast_float_to_float
 func.func @cast_float_to_float() -> tensor<f16> {
   %splat = "tosa.const"() {value = dense<42.0> : tensor<f32>} : () -> tensor<f32>
-  // CHECK: %[[SPLAT:.+]] = "tosa.const"() {value = dense<4.200000e+01> : tensor<f16>} : () -> tensor<f16>
+  // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<4.200000e+01> : tensor<f16>}
   %cast = "tosa.cast"(%splat) : (tensor<f32>) -> tensor<f16>
   // CHECK: return %[[SPLAT]]
   return %cast : tensor<f16>
@@ -513,7 +513,7 @@ func.func @cast_float_to_float() -> tensor<f16> {
 // CHECK: func.func @cast_int_to_float
 func.func @cast_int_to_float() -> tensor<f16> {
   %splat = "tosa.const"() {value = dense<4> : tensor<i32>} : () -> tensor<i32>
-  // CHECK: %[[SPLAT:.+]] = "tosa.const"() {value = dense<4.000000e+00> : tensor<f16>} : () -> tensor<f16>
+  // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<4.000000e+00> : tensor<f16>}
   %cast = "tosa.cast"(%splat) : (tensor<i32>) -> tensor<f16>
   // CHECK: return %[[SPLAT]]
   return %cast : tensor<f16>
@@ -524,7 +524,7 @@ func.func @cast_int_to_float() -> tensor<f16> {
 // CHECK: func.func @cast_float_to_int
 func.func @cast_float_to_int() -> tensor<i16> {
   %splat = "tosa.const"() {value = dense<-4.0> : tensor<f32>} : () -> tensor<f32>
-  // CHECK: %[[SPLAT:.+]] = "tosa.const"() {value = dense<-4> : tensor<i16>} : () -> tensor<i16>
+  // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<-4> : tensor<i16>}
   %cast = "tosa.cast"(%splat) : (tensor<f32>) -> tensor<i16>
   // CHECK: return %[[SPLAT]]
   return %cast : tensor<i16>
@@ -535,7 +535,7 @@ func.func @cast_float_to_int() -> tensor<i16> {
 // CHECK: func.func @cast_int_to_int_trunc
 func.func @cast_int_to_int_trunc() -> tensor<i16> {
   %splat = "tosa.const"() {value = dense<-1> : tensor<i32>} : () -> tensor<i32>
-  // CHECK: %[[SPLAT:.+]] = "tosa.const"() {value = dense<-1> : tensor<i16>} : () -> tensor<i16>
+  // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<-1> : tensor<i16>}
   %cast = "tosa.cast"(%splat) : (tensor<i32>) -> tensor<i16>
   // CHECK: return %[[SPLAT]]
   return %cast : tensor<i16>
@@ -546,7 +546,7 @@ func.func @cast_int_to_int_trunc() -> tensor<i16> {
 // CHECK: func.func @cast_int_to_int_sign
 func.func @cast_int_to_int_sign() -> tensor<i32> {
   %splat = "tosa.const"() {value = dense<-1> : tensor<i16>} : () -> tensor<i16>
-  // CHECK: %[[SPLAT:.+]] = "tosa.const"() {value = dense<-1> : tensor<i32>} : () -> tensor<i32>
+  // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<-1> : tensor<i32>}
   %cast = "tosa.cast"(%splat) : (tensor<i16>) -> tensor<i32>
   // CHECK: return %[[SPLAT]]
   return %cast : tensor<i32>
@@ -556,7 +556,7 @@ func.func @cast_int_to_int_sign() -> tensor<i32> {
 
 // CHECK-LABEL: @reverse_splat
 func.func @reverse_splat() -> tensor<10xi32> {
-  // CHECK: %[[SPLAT:.+]] = "tosa.const"() {value = dense<42> : tensor<10xi32>}
+  // CHECK: %[[SPLAT:.+]] = "tosa.const"() <{value = dense<42> : tensor<10xi32>}
   %splat = "tosa.const"() {value = dense<42> : tensor<10xi32>} : () -> tensor<10xi32>
   %reverse = "tosa.reverse"(%splat) { axis = 0 : i64 } : (tensor<10xi32>) -> tensor<10xi32>
   // CHECK: return %[[SPLAT]]
@@ -569,7 +569,7 @@ func.func @reverse_splat() -> tensor<10xi32> {
 func.func @reverse_length_one(%arg0 : tensor<10x1xi32>) -> (tensor<10x1xi32>, tensor<10x1xi32>) {
   %nofold = "tosa.reverse"(%arg0) { axis = 0 : i64 } : (tensor<10x1xi32>) -> tensor<10x1xi32>
   %fold = "tosa.reverse"(%arg0) { axis = 1 : i64 } : (tensor<10x1xi32>) -> tensor<10x1xi32>
-  // CHECK: %[[NOFOLD:.+]] = "tosa.reverse"(%arg0) {axis = 0 : i64}
+  // CHECK: %[[NOFOLD:.+]] = "tosa.reverse"(%arg0) <{axis = 0 : i64}
   // CHECK: return %[[NOFOLD]], %arg0
   return %nofold, %fold : tensor<10x1xi32>, tensor<10x1xi32>
 }

diff  --git a/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir
index 77a7b7e884048..b8f0177646fc8 100644
--- a/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir
+++ b/mlir/test/Dialect/Tosa/tosa-decompose-conv2d.mlir
@@ -5,13 +5,13 @@
 // CHECK-LABEL: @conv2d_as_fully_connected
 func.func @conv2d_as_fully_connected(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<3x1x1x2xf32>, %arg2: tensor<3xf32>) -> tensor<4x10x10x3xf32> {
   // CHECK-NOT: "tosa.conv2d"
-  // CHECK: %[[VAR0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 400, 2>}
+  // CHECK: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 400, 2>}
   // CHECK-SAME: -> tensor<400x2xf32>
-  // CHECK: %[[VAR1:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 3, 2>}
+  // CHECK: %[[VAR1:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 3, 2>}
   // CHECK-SAME: -> tensor<3x2xf32>
   // CHECK: %[[VAR2:.*]] = "tosa.fully_connected"(%[[VAR0]], %[[VAR1]], %arg2)
   // CHECK-SAME: -> tensor<400x3xf32>
-  // CHECK: %[[VAR3:.*]] = "tosa.reshape"(%[[VAR2]]) {new_shape = array<i64: 4, 10, 10, 3>}
+  // CHECK: %[[VAR3:.*]] = "tosa.reshape"(%[[VAR2]]) <{new_shape = array<i64: 4, 10, 10, 3>}
   // CHECK-SAME: -> tensor<4x10x10x3xf32>
   // CHECK: return %[[VAR3]]
   %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32>
@@ -23,14 +23,14 @@ func.func @conv2d_as_fully_connected(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor
 // CHECK-LABEL: @conv2d_as_fully_connected_quant
 func.func @conv2d_as_fully_connected_quant(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<3x1x1x2xi8>, %arg2: tensor<3xi32>) -> tensor<4x10x10x3xi32> {
   // CHECK-NOT: "tosa.conv2d"
-  // CHECK: %[[VAR0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 400, 2>}
+  // CHECK: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 400, 2>}
   // CHECK-SAME: -> tensor<400x2xi8>
-  // CHECK: %[[VAR1:.*]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 3, 2>}
+  // CHECK: %[[VAR1:.*]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 3, 2>}
   // CHECK-SAME: -> tensor<3x2xi8>
   // CHECK: %[[VAR2:.*]] = "tosa.fully_connected"(%[[VAR0]], %[[VAR1]], %arg2)
   // CHECK-SAME: quantization_info = #tosa.conv_quant<input_zp = 42, weight_zp = 24>
   // CHECK-SAME: -> tensor<400x3xi32>
-  // CHECK: %[[VAR3:.*]] = "tosa.reshape"(%[[VAR2]]) {new_shape = array<i64: 4, 10, 10, 3>}
+  // CHECK: %[[VAR3:.*]] = "tosa.reshape"(%[[VAR2]]) <{new_shape = array<i64: 4, 10, 10, 3>}
   // CHECK-SAME: -> tensor<4x10x10x3xi32>
   // CHECK: return %[[VAR3]]
   %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>, quantization_info = #tosa.conv_quant<input_zp = 42, weight_zp = 24>} : (tensor<4x10x10x2xi8>, tensor<3x1x1x2xi8>, tensor<3xi32>) -> tensor<4x10x10x3xi32>
@@ -44,10 +44,10 @@ func.func @conv2d_as_fully_connected_quant(%arg0: tensor<4x10x10x2xi8>, %arg1: t
 // CHECK-SAME:                                     %[[VAL_1:.*]]: tensor<384x1x1x64xi8>,
 // CHECK-SAME:                                     %[[VAL_2:.*]]: tensor<384xi32>) -> tensor<?x14x14x384xi32> {
 func.func @conv_with_dynamic_dim(%arg0: tensor<?x14x14x64xi8>, %arg1: tensor<384x1x1x64xi8>, %arg2: tensor<384xi32>) -> tensor<?x14x14x384xi32> {
-// CHECK:           %[[VAL_3:.*]] = "tosa.reshape"(%[[VAL_0]]) {new_shape = array<i64: -1, 64>} : (tensor<?x14x14x64xi8>) -> tensor<?x64xi8>
-// CHECK:           %[[VAL_4:.*]] = "tosa.reshape"(%[[VAL_1]]) {new_shape = array<i64: 384, 64>} : (tensor<384x1x1x64xi8>) -> tensor<384x64xi8>
-// CHECK:           %[[VAL_5:.*]] = "tosa.fully_connected"(%[[VAL_3]], %[[VAL_4]], %[[VAL_2]]) {quantization_info = #tosa.conv_quant<input_zp = -6, weight_zp = 11>} : (tensor<?x64xi8>, tensor<384x64xi8>, tensor<384xi32>) -> tensor<?x384xi32>
-// CHECK:           %[[VAL_6:.*]] = "tosa.reshape"(%[[VAL_5]]) {new_shape = array<i64: -1, 14, 14, 384>} : (tensor<?x384xi32>) -> tensor<?x14x14x384xi32>
+// CHECK:           %[[VAL_3:.*]] = "tosa.reshape"(%[[VAL_0]]) <{new_shape = array<i64: -1, 64>}> : (tensor<?x14x14x64xi8>) -> tensor<?x64xi8>
+// CHECK:           %[[VAL_4:.*]] = "tosa.reshape"(%[[VAL_1]]) <{new_shape = array<i64: 384, 64>}> : (tensor<384x1x1x64xi8>) -> tensor<384x64xi8>
+// CHECK:           %[[VAL_5:.*]] = "tosa.fully_connected"(%[[VAL_3]], %[[VAL_4]], %[[VAL_2]]) <{quantization_info = #tosa.conv_quant<input_zp = -6, weight_zp = 11>}> : (tensor<?x64xi8>, tensor<384x64xi8>, tensor<384xi32>) -> tensor<?x384xi32>
+// CHECK:           %[[VAL_6:.*]] = "tosa.reshape"(%[[VAL_5]]) <{new_shape = array<i64: -1, 14, 14, 384>}> : (tensor<?x384xi32>) -> tensor<?x14x14x384xi32>
 // CHECK:           return %[[VAL_6]] : tensor<?x14x14x384xi32>
 // CHECK:         }
   %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, quantization_info = #tosa.conv_quant<input_zp = -6, weight_zp = 11>, stride = array<i64: 1, 1>} : (tensor<?x14x14x64xi8>, tensor<384x1x1x64xi8>, tensor<384xi32>) -> tensor<?x14x14x384xi32>
@@ -58,13 +58,13 @@ func.func @conv_with_dynamic_dim(%arg0: tensor<?x14x14x64xi8>, %arg1: tensor<384
 
 // CHECK-LABEL: @conv2d_as_fully_connected_padded
 func.func @conv2d_as_fully_connected_padded(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<3x1x1x2xi8>, %arg2: tensor<3xi32>) -> tensor<4x12x12x3xi32> {
-  // CHECK-DAG: %[[PAD_SHAPE:.+]] = "tosa.const"() {value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi64>}
-  // CHECK-DAG: %[[PAD_VAL:.+]] = "tosa.const"() {value = dense<42> : tensor<i8>}
+  // CHECK-DAG: %[[PAD_SHAPE:.+]] = "tosa.const"() <{value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi64>}
+  // CHECK-DAG: %[[PAD_VAL:.+]] = "tosa.const"() <{value = dense<42> : tensor<i8>}
   // CHECK-DAG: %[[PAD:.+]] = "tosa.pad"(%arg0, %[[PAD_SHAPE]], %[[PAD_VAL]]) : (tensor<4x10x10x2xi8>, tensor<4x2xi64>, tensor<i8>) -> tensor<4x12x12x2xi8>
-  // CHECK-DAG: %[[RESHAPE_INPUT:.+]] = "tosa.reshape"(%[[PAD]]) {new_shape = array<i64: 576, 2>}
-  // CHECK-DAG: %[[RESHAPE_FILTER:.+]] = "tosa.reshape"(%arg1) {new_shape = array<i64: 3, 2>}
-  // CHECK-DAG: %[[FULLY:.+]] = "tosa.fully_connected"(%[[RESHAPE_INPUT]], %[[RESHAPE_FILTER]], %arg2) {quantization_info = #tosa.conv_quant<input_zp = 42, weight_zp = 24>}
-  // CHECK: %[[RESHAPE:.+]] = "tosa.reshape"(%[[FULLY]]) {new_shape = array<i64: 4, 12, 12, 3>}
+  // CHECK-DAG: %[[RESHAPE_INPUT:.+]] = "tosa.reshape"(%[[PAD]]) <{new_shape = array<i64: 576, 2>}
+  // CHECK-DAG: %[[RESHAPE_FILTER:.+]] = "tosa.reshape"(%arg1) <{new_shape = array<i64: 3, 2>}
+  // CHECK-DAG: %[[FULLY:.+]] = "tosa.fully_connected"(%[[RESHAPE_INPUT]], %[[RESHAPE_FILTER]], %arg2) <{quantization_info = #tosa.conv_quant<input_zp = 42, weight_zp = 24>}
+  // CHECK: %[[RESHAPE:.+]] = "tosa.reshape"(%[[FULLY]]) <{new_shape = array<i64: 4, 12, 12, 3>}
   %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {pad = array<i64: 1, 1, 1, 1>, stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>, quantization_info = #tosa.conv_quant<input_zp = 42, weight_zp = 24>} : (tensor<4x10x10x2xi8>, tensor<3x1x1x2xi8>, tensor<3xi32>) -> tensor<4x12x12x3xi32>
   return %0 : tensor<4x12x12x3xi32>
 }

diff  --git a/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir
index f9458c78300c5..e835991273ec5 100644
--- a/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir
+++ b/mlir/test/Dialect/Tosa/tosa-decompose-depthwise.mlir
@@ -5,11 +5,11 @@
 // CHECK-LABEL: @depthwise_conv2d_as_mul
 func.func @depthwise_conv2d_as_mul(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1x1x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x10x10x6xf32> {
   // CHECK-NOT: "tosa.depthwise_conv2d"
-  // CHECK: %[[VAR0:.*]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 4, 10, 10, 2, 1>}
+  // CHECK: %[[VAR0:.*]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 4, 10, 10, 2, 1>}
   // CHECK-SAME: -> tensor<4x10x10x2x1xf32>
   // CHECK: %[[VAR2:.*]] = "tosa.mul"(%[[VAR0]], %arg1)
   // CHECK-SAME: -> tensor<4x10x10x2x3xf32>
-  // CHECK: %[[VAR3:.*]] = "tosa.reshape"(%[[VAR2]]) {new_shape = array<i64: 4, 10, 10, 6>}
+  // CHECK: %[[VAR3:.*]] = "tosa.reshape"(%[[VAR2]]) <{new_shape = array<i64: 4, 10, 10, 6>}
   // CHECK-SAME: -> tensor<4x10x10x6xf32>
   // CHECK: %[[VAR4:.*]] = "tosa.add"(%[[VAR3]], %arg2)
   // CHECK-SAME: -> tensor<4x10x10x6xf32>
@@ -22,15 +22,15 @@ func.func @depthwise_conv2d_as_mul(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1
 
 // CHECK-LABEL: @depthwise_conv2d_as_mul_q
 func.func @depthwise_conv2d_as_mul_q(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<1x1x2x3xi8>, %arg2: tensor<6xi32>) -> tensor<4x10x10x6xi32> {
-  // CHECK: %[[iZp:.+]] = "tosa.const"() {value = dense<7> : tensor<i32>}
-  // CHECK: %[[wZp:.+]] = "tosa.const"() {value = dense<11> : tensor<i32>}
-  // CHECK: %[[rIn:.+]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 4, 10, 10, 2, 1>}
+  // CHECK: %[[iZp:.+]] = "tosa.const"() <{value = dense<7> : tensor<i32>}
+  // CHECK: %[[wZp:.+]] = "tosa.const"() <{value = dense<11> : tensor<i32>}
+  // CHECK: %[[rIn:.+]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 4, 10, 10, 2, 1>}
   // CHECK: %[[cIn:.+]] = "tosa.cast"(%[[rIn]]) : (tensor<4x10x10x2x1xi8>) -> tensor<4x10x10x2x1xi32>
   // CHECK: %[[cWe:.+]] = "tosa.cast"(%arg1) : (tensor<1x1x2x3xi8>) -> tensor<1x1x2x3xi32>
   // CHECK: %[[sIn:.+]] = "tosa.sub"(%[[cIn]], %[[iZp]])
   // CHECK: %[[sWe:.+]] = "tosa.sub"(%[[cWe]], %[[wZp]])
-  // CHECK: %[[mul:.+]] = "tosa.mul"(%[[sIn]], %[[sWe]]) {shift = 0 : i32}
-  // CHECK: %[[reO:.+]] = "tosa.reshape"(%[[mul]]) {new_shape = array<i64: 4, 10, 10, 6>}
+  // CHECK: %[[mul:.+]] = "tosa.mul"(%[[sIn]], %[[sWe]]) <{shift = 0 : i32}
+  // CHECK: %[[reO:.+]] = "tosa.reshape"(%[[mul]]) <{new_shape = array<i64: 4, 10, 10, 6>}
   // CHECK: %[[add:.+]] = "tosa.add"(%[[reO]], %arg2)
   %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>, quantization_info = #tosa.conv_quant<input_zp = 7, weight_zp = 11>} : (tensor<4x10x10x2xi8>, tensor<1x1x2x3xi8>, tensor<6xi32>) -> tensor<4x10x10x6xi32>
   return %0 : tensor<4x10x10x6xi32>
@@ -40,12 +40,12 @@ func.func @depthwise_conv2d_as_mul_q(%arg0: tensor<4x10x10x2xi8>, %arg1: tensor<
 
 // CHECK-LABEL: @depthwise_conv2d_as_mul_padded
 func.func @depthwise_conv2d_as_mul_padded(%arg0: tensor<4x10x10x2xf32>, %arg1: tensor<1x1x2x3xf32>, %arg2: tensor<6xf32>) -> tensor<4x12x12x6xf32> {
-  // CHECK: %[[pad:.+]] = "tosa.const"() {value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0], [0, 0]]> : tensor<5x2xi64>}
-  // CHECK: %[[zero:.+]] = "tosa.const"() {value = dense<0.000000e+00> : tensor<f32>}
-  // CHECK: %[[reIn:.+]] = "tosa.reshape"(%arg0) {new_shape = array<i64: 4, 10, 10, 2, 1>}
+  // CHECK: %[[pad:.+]] = "tosa.const"() <{value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0], [0, 0]]> : tensor<5x2xi64>}
+  // CHECK: %[[zero:.+]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor<f32>}
+  // CHECK: %[[reIn:.+]] = "tosa.reshape"(%arg0) <{new_shape = array<i64: 4, 10, 10, 2, 1>}
   // CHECK: %[[padded:.+]] = "tosa.pad"(%[[reIn]], %[[pad]], %[[zero]]) : (tensor<4x10x10x2x1xf32>, tensor<5x2xi64>, tensor<f32>) -> tensor<4x12x12x2x1xf32>
-  // CHECK: %[[mul:.+]] = "tosa.mul"(%3, %arg1) {shift = 0 : i32}
-  // CHECK: %[[reOut:.+]] = "tosa.reshape"(%[[mul]]) {new_shape = array<i64: 4, 12, 12, 6>}
+  // CHECK: %[[mul:.+]] = "tosa.mul"(%3, %arg1) <{shift = 0 : i32}
+  // CHECK: %[[reOut:.+]] = "tosa.reshape"(%[[mul]]) <{new_shape = array<i64: 4, 12, 12, 6>}
   // CHECK: %[[add:.+]] = "tosa.add"(%[[reOut]], %arg2)
   %0 = "tosa.depthwise_conv2d"(%arg0, %arg1, %arg2) {pad = array<i64: 1, 1, 1, 1>, stride = array<i64: 1, 1>, dilation = array<i64: 1, 1>} : (tensor<4x10x10x2xf32>, tensor<1x1x2x3xf32>, tensor<6xf32>) -> tensor<4x12x12x6xf32>
   return %0 : tensor<4x12x12x6xf32>

diff  --git a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir
index 81b046e042002..6ccf510804d99 100644
--- a/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir
+++ b/mlir/test/Dialect/Tosa/tosa-decompose-transpose-conv.mlir
@@ -2,8 +2,8 @@
 
 // CHECK-LABEL: @transpose_conv2d
 func.func @transpose_conv2d(%arg0: tensor<2x16x14x3xf32>, %arg1: tensor<5x3x6x3xf32>, %arg2: tensor<5xf32>) -> tensor<2x18x19x5xf32> {
-  // CHECK: %[[REV1:.+]] = "tosa.reverse"(%arg1) {axis = 1 : i64}
-  // CHECK: %[[REV2:.+]] = "tosa.reverse"(%[[REV1]]) {axis = 2 : i64}
+  // CHECK: %[[REV1:.+]] = "tosa.reverse"(%arg1) <{axis = 1 : i64}
+  // CHECK: %[[REV2:.+]] = "tosa.reverse"(%[[REV1]]) <{axis = 2 : i64}
   // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2)
   // CHECK-SAME: dilation = array<i64: 1, 1>, pad = array<i64: 2, 2, 5, 5>, stride = array<i64: 1, 1>
   %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array<i64: 0, 0, 0, 0>, out_shape = array<i64: -1, -1, -1, -1>, stride = array<i64: 1, 1>} : (tensor<2x16x14x3xf32>, tensor<5x3x6x3xf32>, tensor<5xf32>) -> tensor<2x18x19x5xf32>
@@ -15,9 +15,9 @@ func.func @transpose_conv2d(%arg0: tensor<2x16x14x3xf32>, %arg1: tensor<5x3x6x3x
 // CHECK-LABEL: @transpose_conv2d_quantized
 
 func.func @transpose_conv2d_quantized(%arg0: tensor<2x16x14x3xi8>, %arg1: tensor<5x3x6x3xi8>, %arg2: tensor<5xi32>) -> (tensor<2x18x19x5xi32>) {
-  // CHECK: %[[REV1:.+]] = "tosa.reverse"(%arg1) {axis = 1 : i64}
-  // CHECK: %[[REV2:.+]] = "tosa.reverse"(%[[REV1]]) {axis = 2 : i64}
-  // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2) {dilation = array<i64: 1, 1>, pad = array<i64: 2, 2, 5, 5>, quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, stride = array<i64: 1, 1>}
+  // CHECK: %[[REV1:.+]] = "tosa.reverse"(%arg1) <{axis = 1 : i64}
+  // CHECK: %[[REV2:.+]] = "tosa.reverse"(%[[REV1]]) <{axis = 2 : i64}
+  // CHECK: "tosa.conv2d"(%arg0, %[[REV2]], %arg2) <{dilation = array<i64: 1, 1>, pad = array<i64: 2, 2, 5, 5>, quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, stride = array<i64: 1, 1>}
   %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array<i64: 0, 0, 0, 0>, quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, out_shape = array<i64: -1, -1, -1, -1>, stride = array<i64: 1, 1>} : (tensor<2x16x14x3xi8>, tensor<5x3x6x3xi8>, tensor<5xi32>) -> tensor<2x18x19x5xi32>
   return %0 : tensor<2x18x19x5xi32>
 }
@@ -26,8 +26,8 @@ func.func @transpose_conv2d_quantized(%arg0: tensor<2x16x14x3xi8>, %arg1: tensor
 
 // CHECK-LABEL: @transpose_conv2d_quantized_padded
 func.func @transpose_conv2d_quantized_padded(%arg0: tensor<2x16x14x3xi8>, %arg1: tensor<5x3x6x3xi8>, %arg2: tensor<5xi32>) -> (tensor<2x21x26x5xi32>) {
-  // CHECK-DAG: %[[REV0:.+]] = "tosa.reverse"(%0) {axis = 2 : i64}
-  // CHECK-DAG: %[[REV1:.+]] = "tosa.reverse"(%arg1) {axis = 1 : i64}
+  // CHECK-DAG: %[[REV0:.+]] = "tosa.reverse"(%0) <{axis = 2 : i64}
+  // CHECK-DAG: %[[REV1:.+]] = "tosa.reverse"(%arg1) <{axis = 1 : i64}
   // CHECK: "tosa.conv2d"(%arg0, %1, %arg2) 
   // CHECK-SAME: dilation = array<i64: 1, 1>, pad = array<i64: 3, 4, 8, 9>,
   // CHECK-SAME: quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, stride = array<i64: 1, 1>}
@@ -44,27 +44,27 @@ func.func @transpose_conv2d_quantized_padded(%arg0: tensor<2x16x14x3xi8>, %arg1:
 // CHECK-LABEL: @transpose_conv2d_strided
 func.func @transpose_conv2d_strided(%arg0: tensor<2x17x15x3xf32>, %arg1: tensor<5x3x5x3xf32>, %arg2: tensor<5xf32>) -> tensor<2x?x?x5xf32> {
   // Manipulate the weight matrix to handle striding.
-  // CHECK-DAG: %[[PADV:.+]]  = "tosa.const"() {value = dense<{{\[\[}}0, 0], [0, 1], [0, 1], [0, 0]]> : tensor<4x2xi32>}
-  // CHECK-DAG: %[[TRANSV:.+]]  = "tosa.const"() {value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>}
+  // CHECK-DAG: %[[PADV:.+]]  = "tosa.const"() <{value = dense<{{\[\[}}0, 0], [0, 1], [0, 1], [0, 0]]> : tensor<4x2xi32>}
+  // CHECK-DAG: %[[TRANSV:.+]]  = "tosa.const"() <{value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>}
   // CHECK-DAG: %[[PADW:.+]]  = "tosa.pad"(%arg1, %[[PADV]])
-  // CHECK-DAG: %[[RESW1:.+]]  = "tosa.reshape"(%[[PADW]]) {new_shape = array<i64: 5, 2, 2, 2, 3, 3>}
+  // CHECK-DAG: %[[RESW1:.+]]  = "tosa.reshape"(%[[PADW]]) <{new_shape = array<i64: 5, 2, 2, 2, 3, 3>}
   // CHECK-DAG: %[[TRANS:.+]]  = "tosa.transpose"(%[[RESW1]], %[[TRANSV]])
-  // CHECK-DAG: %[[RESW2:.+]]  = "tosa.reshape"(%[[TRANS]]) {new_shape = array<i64: 30, 2, 2, 3>}
-  // CHECK-DAG: %[[REV1:.+]]  = "tosa.reverse"(%[[RESW2]]) {axis = 1 : i64}
-  // CHECK-DAG: %[[NEWWEIGHT:.+]] = "tosa.reverse"(%[[REV1]]) {axis = 2 : i64}
+  // CHECK-DAG: %[[RESW2:.+]]  = "tosa.reshape"(%[[TRANS]]) <{new_shape = array<i64: 30, 2, 2, 3>}
+  // CHECK-DAG: %[[REV1:.+]]  = "tosa.reverse"(%[[RESW2]]) <{axis = 1 : i64}
+  // CHECK-DAG: %[[NEWWEIGHT:.+]] = "tosa.reverse"(%[[REV1]]) <{axis = 2 : i64}
 
   // Pad out the input matrix to handle the transpose conv.
-  // CHECK-DAG: %[[PAD:.+]]  = "tosa.const"() {value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi32>}
-  // CHECK-DAG: %[[TRANS2:.+]]  = "tosa.const"() {value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>}
+  // CHECK-DAG: %[[PAD:.+]]  = "tosa.const"() <{value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi32>}
+  // CHECK-DAG: %[[TRANS2:.+]]  = "tosa.const"() <{value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>}
   // CHECK-DAG: %[[NEWINPUT:.+]] = "tosa.pad"(%arg0, %[[PAD]])
 
   // Manipulate the final shape.
-  // CHECK-DAG: %[[BIAS:.+]]  = "tosa.const"() {value = dense<0.000000e+00> : tensor<30xf32>}
-  // CHECK-DAG: %[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]]) {dilation = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>}
-  // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = "tosa.reshape"(%[[CONV]]) {new_shape = array<i64: 2, 18, 16, 2, 3, 5>}
+  // CHECK-DAG: %[[BIAS:.+]]  = "tosa.const"() <{value = dense<0.000000e+00> : tensor<30xf32>}
+  // CHECK-DAG: %[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]]) <{dilation = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>}
+  // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = "tosa.reshape"(%[[CONV]]) <{new_shape = array<i64: 2, 18, 16, 2, 3, 5>}
   // CHECK-DAG: %[[TRANS_OUT:.+]] = "tosa.transpose"(%[[RESHAPE_OUT_1]], %[[TRANS2]])
-  // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = "tosa.reshape"(%[[TRANS_OUT]]) {new_shape = array<i64: 2, 36, 48, 5>}
-  // CHECK-DAG: %[[SLICE:.+]] = "tosa.slice"(%[[RESHAPE_OUT_2]]) {size = array<i64: 2, 35, 47, 5>, start = array<i64: 0, 0, 0, 0>}
+  // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = "tosa.reshape"(%[[TRANS_OUT]]) <{new_shape = array<i64: 2, 36, 48, 5>}
+  // CHECK-DAG: %[[SLICE:.+]] = "tosa.slice"(%[[RESHAPE_OUT_2]]) <{size = array<i64: 2, 35, 47, 5>, start = array<i64: 0, 0, 0, 0>}
   // CHECK: %[[ADD:.+]] = "tosa.add"(%[[SLICE]], %arg2)
   %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array<i64: 0, 0, 0, 0>, out_shape = array<i64: -1, -1, -1, -1>, stride = array<i64: 2, 3>} : (tensor<2x17x15x3xf32>, tensor<5x3x5x3xf32>, tensor<5xf32>) -> tensor<2x35x47x5xf32>
   %1 = tensor.cast %0 : tensor<2x35x47x5xf32> to tensor<2x?x?x5xf32>
@@ -76,29 +76,29 @@ func.func @transpose_conv2d_strided(%arg0: tensor<2x17x15x3xf32>, %arg1: tensor<
 // CHECK-LABEL: @transpose_conv2d_strided_quantized
 func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1: tensor<5x3x5x3xi8>, %arg2: tensor<5xi32>) -> (tensor<2x35x47x5xi32>) {
   // Manipulate the weight matrix to handle striding.
-  // CHECK-DAG: %[[PADV:.+]]  = "tosa.const"() {value = dense<{{\[\[}}0, 0], [0, 1], [0, 1], [0, 0]]> : tensor<4x2xi32>}
-  // CHECK-DAG: %[[TRANSV:.+]]  = "tosa.const"() {value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>}
-  // CHECK-DAG: %[[PADW:.+]]  = "tosa.pad"(%arg1, %[[PADV]]) {quantization_info = #tosa.pad_quant<input_zp = 42>}
-  // CHECK-DAG: %[[RESW1:.+]]  = "tosa.reshape"(%[[PADW]]) {new_shape = array<i64: 5, 2, 2, 2, 3, 3>}
+  // CHECK-DAG: %[[PADV:.+]]  = "tosa.const"() <{value = dense<{{\[\[}}0, 0], [0, 1], [0, 1], [0, 0]]> : tensor<4x2xi32>}
+  // CHECK-DAG: %[[TRANSV:.+]]  = "tosa.const"() <{value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>}
+  // CHECK-DAG: %[[PADW:.+]]  = "tosa.pad"(%arg1, %[[PADV]]) <{quantization_info = #tosa.pad_quant<input_zp = 42>}
+  // CHECK-DAG: %[[RESW1:.+]]  = "tosa.reshape"(%[[PADW]]) <{new_shape = array<i64: 5, 2, 2, 2, 3, 3>}
   // CHECK-DAG: %[[TRANS:.+]]  = "tosa.transpose"(%[[RESW1]], %[[TRANSV]])
-  // CHECK-DAG: %[[RESW2:.+]]  = "tosa.reshape"(%[[TRANS]]) {new_shape = array<i64: 30, 2, 2, 3>}
-  // CHECK-DAG: %[[REV1:.+]]  = "tosa.reverse"(%[[RESW2]]) {axis = 1 : i64}
-  // CHECK-DAG: %[[NEWWEIGHT:.+]] = "tosa.reverse"(%[[REV1]]) {axis = 2 : i64}
+  // CHECK-DAG: %[[RESW2:.+]]  = "tosa.reshape"(%[[TRANS]]) <{new_shape = array<i64: 30, 2, 2, 3>}
+  // CHECK-DAG: %[[REV1:.+]]  = "tosa.reverse"(%[[RESW2]]) <{axis = 1 : i64}
+  // CHECK-DAG: %[[NEWWEIGHT:.+]] = "tosa.reverse"(%[[REV1]]) <{axis = 2 : i64}
 
   // Pad out the input matrix to handle the transpose conv.
-  // CHECK-DAG: %[[PAD:.+]]  = "tosa.const"() {value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi32>}
-  // CHECK-DAG: %[[TRANS2:.+]]  = "tosa.const"() {value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>}
-  // CHECK-DAG: %[[NEWINPUT:.+]] = "tosa.pad"(%arg0, %[[PAD]]) {quantization_info = #tosa.pad_quant<input_zp = -22>}
+  // CHECK-DAG: %[[PAD:.+]]  = "tosa.const"() <{value = dense<{{\[\[}}0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi32>}
+  // CHECK-DAG: %[[TRANS2:.+]]  = "tosa.const"() <{value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>}
+  // CHECK-DAG: %[[NEWINPUT:.+]] = "tosa.pad"(%arg0, %[[PAD]]) <{quantization_info = #tosa.pad_quant<input_zp = -22>}
 
   // Manipulate the final shape.
-  // CHECK-DAG: %[[BIAS:.+]]  = "tosa.const"() {value = dense<0> : tensor<30xi32>}
-  // CHECK-DAG: %[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]]) {dilation = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, stride = array<i64: 1, 1>}
-  // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = "tosa.reshape"(%[[CONV]]) {new_shape = array<i64: 2, 18, 16, 2, 3, 5>}
+  // CHECK-DAG: %[[BIAS:.+]]  = "tosa.const"() <{value = dense<0> : tensor<30xi32>}
+  // CHECK-DAG: %[[CONV:.+]] = "tosa.conv2d"(%[[NEWINPUT]], %[[NEWWEIGHT]], %[[BIAS]]) <{dilation = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, stride = array<i64: 1, 1>}
+  // CHECK-DAG: %[[RESHAPE_OUT_1:.+]] = "tosa.reshape"(%[[CONV]]) <{new_shape = array<i64: 2, 18, 16, 2, 3, 5>}
   // CHECK-DAG: %[[TRANS_OUT:.+]] = "tosa.transpose"(%[[RESHAPE_OUT_1]], %[[TRANS2]])
-  // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = "tosa.reshape"(%[[TRANS_OUT]]) {new_shape = array<i64: 2, 36, 48, 5>}
-  // CHECK-DAG: %[[SLICE:.+]] = "tosa.slice"(%[[RESHAPE_OUT_2]]) {size = array<i64: 2, 35, 47, 5>, start = array<i64: 0, 0, 0, 0>}
+  // CHECK-DAG: %[[RESHAPE_OUT_2:.+]] = "tosa.reshape"(%[[TRANS_OUT]]) <{new_shape = array<i64: 2, 36, 48, 5>}
+  // CHECK-DAG: %[[SLICE:.+]] = "tosa.slice"(%[[RESHAPE_OUT_2]]) <{size = array<i64: 2, 35, 47, 5>, start = array<i64: 0, 0, 0, 0>}
   // CHECK: %[[ADD:.+]] = "tosa.add"(%[[SLICE]], %arg2)
-  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = array<i64: 0, 0, 0, 0>, quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, out_shape = array<i64: -1, -1, -1, -1>, stride = array<i64: 2, 3>} : (tensor<2x17x15x3xi8>, tensor<5x3x5x3xi8>, tensor<5xi32>) -> tensor<2x35x47x5xi32>
+  %0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) <{out_pad = array<i64: 0, 0, 0, 0>, quantization_info = #tosa.conv_quant<input_zp = -22, weight_zp = 42>, out_shape = array<i64: -1, -1, -1, -1>, stride = array<i64: 2, 3>}> : (tensor<2x17x15x3xi8>, tensor<5x3x5x3xi8>, tensor<5xi32>) -> tensor<2x35x47x5xi32>
   return %0 : tensor<2x35x47x5xi32>
 }
 
@@ -108,24 +108,24 @@ func.func @transpose_conv2d_strided_quantized(%arg0: tensor<2x17x15x3xi8>, %arg1
 func.func @transpose_conv2d_strided_overpad(%arg0 : tensor<1x16x1x1xi8>, %arg1 : tensor<1x2x1x1xi8>, %arg2 : tensor<1xi32>) -> (tensor<1x19x2x1xi32>) {
   // CHECK: %[[WEIGHT_PAD:.+]] = "tosa.const"() 
   // CHECK-SAME{literal}: value = dense<[[0, 0], [0, 0], [0, 1], [0, 0]]> : tensor<4x2xi32>
-  // CHECK: %[[WEIGHT_PERMS:.+]] = "tosa.const"() {value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>} : () -> tensor<6xi32>
+  // CHECK: %[[WEIGHT_PERMS:.+]] = "tosa.const"() <{value = dense<[2, 4, 0, 1, 3, 5]> : tensor<6xi32>}
   // CHECK: %[[INPUT_PAD:.+]] = "tosa.const"() 
   // CHECK-SAME{literal}: value = dense<[[0, 0], [1, 1], [0, 0], [0, 0]]> : tensor<4x2xi32>}
-  // CHECK: %[[ZERO:.+]] = "tosa.const"() {value = dense<0> : tensor<2xi32>} : () -> tensor<2xi32>
-  // CHECK: %[[RESULT_PERMS:.+]] = "tosa.const"() {value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>}
+  // CHECK: %[[ZERO:.+]] = "tosa.const"() <{value = dense<0> : tensor<2xi32>}
+  // CHECK: %[[RESULT_PERMS:.+]] = "tosa.const"() <{value = dense<[0, 1, 3, 2, 4, 5]> : tensor<6xi32>}
   // CHECK: %[[RESULT_PAD:.+]] = "tosa.const"() 
   // CHECK-SAME{literal}: value = dense<[[0, 0], [2, 0], [0, 0], [0, 0]]> : tensor<4x2xi32>}
-  // CHECK: %[[PAD_WEIGHT:.+]] = "tosa.pad"(%arg1, %[[WEIGHT_PAD]]) {quantization_info = #tosa.pad_quant<input_zp = 93>}
-  // CHECK: %[[RESHAPE_WEIGHT_0:.+]] = "tosa.reshape"(%[[PAD_WEIGHT]]) {new_shape = array<i64: 1, 2, 1, 1, 2, 1>}
+  // CHECK: %[[PAD_WEIGHT:.+]] = "tosa.pad"(%arg1, %[[WEIGHT_PAD]]) <{quantization_info = #tosa.pad_quant<input_zp = 93>}
+  // CHECK: %[[RESHAPE_WEIGHT_0:.+]] = "tosa.reshape"(%[[PAD_WEIGHT]]) <{new_shape = array<i64: 1, 2, 1, 1, 2, 1>}
   // CHECK: %[[TRANSPOSE_WEIGHT:.+]] = "tosa.transpose"(%[[RESHAPE_WEIGHT_0]], %[[WEIGHT_PERMS]])
-  // CHECK: %[[RESHAPE_WEIGHT_1:.+]] = "tosa.reshape"(%[[TRANSPOSE_WEIGHT]]) {new_shape = array<i64: 2, 2, 1, 1>}
-  // CHECK: %[[REVERSE:.+]] = "tosa.reverse"(%[[RESHAPE_WEIGHT_1]]) {axis = 1 : i64}
-  // CHECK: %[[PAD_INPUT:.+]] = "tosa.pad"(%arg0, %[[INPUT_PAD]]) {quantization_info = #tosa.pad_quant<input_zp = -103>}
+  // CHECK: %[[RESHAPE_WEIGHT_1:.+]] = "tosa.reshape"(%[[TRANSPOSE_WEIGHT]]) <{new_shape = array<i64: 2, 2, 1, 1>}
+  // CHECK: %[[REVERSE:.+]] = "tosa.reverse"(%[[RESHAPE_WEIGHT_1]]) <{axis = 1 : i64}
+  // CHECK: %[[PAD_INPUT:.+]] = "tosa.pad"(%arg0, %[[INPUT_PAD]]) <{quantization_info = #tosa.pad_quant<input_zp = -103>}
   // CHECK: %[[CONV:.+]] = "tosa.conv2d"(%[[PAD_INPUT]], %[[REVERSE]], %[[ZERO]]) 
   // CHECK-SAME{literal}: dilation = [1, 1], pad = [0, 0, 0, 0], quantization_info = #tosa.conv_quant<input_zp = -103, weight_zp = 93>, stride = [1, 1]}
-  // CHECK: %[[RESHAPE_RESULT_0:.+]] = "tosa.reshape"(%[[CONV]]) {new_shape = array<i64: 1, 17, 1, 1, 2, 1>}
+  // CHECK: %[[RESHAPE_RESULT_0:.+]] = "tosa.reshape"(%[[CONV]]) <{new_shape = array<i64: 1, 17, 1, 1, 2, 1>}
   // CHECK: %[[TRANSPOSE_RESULT:.+]] = "tosa.transpose"(%[[RESHAPE_RESULT_0]], %[[RESULT_PERMS]])
-  // CHECK: %[[RESHAPE_RESULT_1:.+]] = "tosa.reshape"(%[[TRANSPOSE_RESULT]]) {new_shape = array<i64: 1, 17, 2, 1>}
+  // CHECK: %[[RESHAPE_RESULT_1:.+]] = "tosa.reshape"(%[[TRANSPOSE_RESULT]]) <{new_shape = array<i64: 1, 17, 2, 1>}
   // CHECK: %[[PAD_RESULT:.+]] = "tosa.pad"(%[[RESHAPE_RESULT_1]], %[[RESULT_PAD]])
   // CHECK: %[[ADD:.+]] = "tosa.add"(%[[PAD_RESULT]], %arg2)
   %2 =  "tosa.transpose_conv2d"(%arg0, %arg1, %arg2)  {

diff  --git a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
index 268eae90d5cd9..ea2bd42106a24 100644
--- a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
+++ b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
@@ -51,7 +51,7 @@ func.func @test_unary_f32(%arg0 : tensor<4xf32>) -> () {
   // CHECK: "tosa.reciprocal"(%arg0) : (tensor<4xf32>) -> tensor<4xf32>
   %7 = "tosa.reciprocal"(%arg0) : (tensor<4xf32>) -> tensor<*xf32>
 
-  // CHECK: "tosa.reverse"(%arg0) {axis = 0 : i64} : (tensor<4xf32>) -> tensor<4xf32>
+  // CHECK: "tosa.reverse"(%arg0) <{axis = 0 : i64}> : (tensor<4xf32>) -> tensor<4xf32>
   %8 = "tosa.reverse"(%arg0) { axis = 0 : i64 } : (tensor<4xf32>) -> tensor<?xf32>
 
   // CHECK: "tosa.rsqrt"(%arg0) : (tensor<4xf32>) -> tensor<4xf32>
@@ -87,7 +87,7 @@ func.func @test_unary_i32(%arg0 : tensor<4xi32>) -> () {
   // CHECK: "tosa.negate"(%arg0) : (tensor<4xi32>) -> tensor<4xi32>
   %4 = "tosa.negate"(%arg0) : (tensor<4xi32>) -> tensor<*xi32>
 
-  // CHECK: "tosa.reverse"(%arg0) {axis = 0 : i64} : (tensor<4xi32>) -> tensor<4xi32>
+  // CHECK: "tosa.reverse"(%arg0) <{axis = 0 : i64}> : (tensor<4xi32>) -> tensor<4xi32>
   %5 = "tosa.reverse"(%arg0) { axis = 0 : i64 } : (tensor<4xi32>) -> tensor<?xi32>
 
   // CHECK: "tosa.rescale"(%arg0) {{.+}} : (tensor<4xi32>) -> tensor<4xi16>
@@ -111,7 +111,7 @@ func.func @test_binary_scalar_f32(%arg0 : tensor<4xf32>, %arg1 : tensor<f32>) ->
   // CHECK: "tosa.minimum"(%arg0, %arg1) : (tensor<4xf32>, tensor<f32>) -> tensor<4xf32>
   %2 = "tosa.minimum"(%arg0, %arg1) : (tensor<4xf32>, tensor<f32>) -> tensor<*xf32>
 
-  // CHECK: "tosa.mul"(%arg0, %arg1) {shift = 0 : i32} : (tensor<4xf32>, tensor<f32>) -> tensor<4xf32>
+  // CHECK: "tosa.mul"(%arg0, %arg1) <{shift = 0 : i32}> : (tensor<4xf32>, tensor<f32>) -> tensor<4xf32>
   %3 = "tosa.mul"(%arg0, %arg1) { shift = 0 : i32 }: (tensor<4xf32>, tensor<f32>) -> tensor<*xf32>
 
   // CHECK: "tosa.pow"(%arg0, %arg1) : (tensor<4xf32>, tensor<f32>) -> tensor<4xf32>
@@ -145,7 +145,7 @@ func.func @test_binary_broadcast_f32(%arg0 : tensor<4xf32>, %arg1 : tensor<1xf32
   // CHECK: "tosa.minimum"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32>
   %2 = "tosa.minimum"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32>
 
-  // CHECK: "tosa.mul"(%arg0, %arg1) {shift = 0 : i32} : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32>
+  // CHECK: "tosa.mul"(%arg0, %arg1) <{shift = 0 : i32}> : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32>
   %3 = "tosa.mul"(%arg0, %arg1) { shift = 0 : i32 } : (tensor<4xf32>, tensor<1xf32>) -> tensor<*xf32>
 
   // CHECK: "tosa.pow"(%arg0, %arg1) : (tensor<4xf32>, tensor<1xf32>) -> tensor<4xf32>
@@ -173,14 +173,14 @@ func.func @test_binary_i32(%arg0 : tensor<4xi32>, %arg1 : tensor<i32>) -> () {
   // CHECK: "tosa.add"(%arg0, %arg1) : (tensor<4xi32>, tensor<i32>) -> tensor<4xi32>
   %0 = "tosa.add"(%arg0, %arg1) : (tensor<4xi32>, tensor<i32>) -> tensor<*xi32>
 
-  // CHECK: "tosa.bitwise_and"(%arg0, %arg1) {shift = 0 : i32} : (tensor<4xi32>, tensor<i32>) -> tensor<4xi32>
-  %1 = "tosa.bitwise_and"(%arg0, %arg1) { shift = 0 : i32 }: (tensor<4xi32>, tensor<i32>) -> tensor<*xi32>
+  // CHECK: "tosa.bitwise_and"(%arg0, %arg1) : (tensor<4xi32>, tensor<i32>) -> tensor<4xi32>
+  %1 = "tosa.bitwise_and"(%arg0, %arg1): (tensor<4xi32>, tensor<i32>) -> tensor<*xi32>
 
-  // CHECK: "tosa.bitwise_or"(%arg0, %arg1) {shift = 0 : i32} : (tensor<4xi32>, tensor<i32>) -> tensor<4xi32>
-  %2 = "tosa.bitwise_or"(%arg0, %arg1) { shift = 0 : i32 }: (tensor<4xi32>, tensor<i32>) -> tensor<*xi32>
+  // CHECK: "tosa.bitwise_or"(%arg0, %arg1) : (tensor<4xi32>, tensor<i32>) -> tensor<4xi32>
+  %2 = "tosa.bitwise_or"(%arg0, %arg1) : (tensor<4xi32>, tensor<i32>) -> tensor<*xi32>
 
-  // CHECK: "tosa.bitwise_xor"(%arg0, %arg1) {shift = 0 : i32} : (tensor<4xi32>, tensor<i32>) -> tensor<4xi32>
-  %3 = "tosa.bitwise_xor"(%arg0, %arg1) { shift = 0 : i32 }: (tensor<4xi32>, tensor<i32>) -> tensor<*xi32>
+  // CHECK: "tosa.bitwise_xor"(%arg0, %arg1) : (tensor<4xi32>, tensor<i32>) -> tensor<4xi32>
+  %3 = "tosa.bitwise_xor"(%arg0, %arg1): (tensor<4xi32>, tensor<i32>) -> tensor<*xi32>
 
   // CHECK: "tosa.equal"(%arg0, %arg1) : (tensor<4xi32>, tensor<i32>) -> tensor<4xi1>
   %4 = "tosa.equal"(%arg0, %arg1) : (tensor<4xi32>, tensor<i32>) -> tensor<*xi1>
@@ -203,7 +203,7 @@ func.func @test_binary_i32(%arg0 : tensor<4xi32>, %arg1 : tensor<i32>) -> () {
   // CHECK: "tosa.minimum"(%arg0, %arg1) : (tensor<4xi32>, tensor<i32>) -> tensor<4xi32>
   %10 = "tosa.minimum"(%arg0, %arg1) : (tensor<4xi32>, tensor<i32>) -> tensor<*xi32>
 
-  // CHECK: "tosa.mul"(%arg0, %arg1) {shift = 0 : i32} : (tensor<4xi32>, tensor<i32>) -> tensor<4xi32>
+  // CHECK: "tosa.mul"(%arg0, %arg1) <{shift = 0 : i32}> : (tensor<4xi32>, tensor<i32>) -> tensor<4xi32>
   %11 = "tosa.mul"(%arg0, %arg1) { shift = 0 : i32 }: (tensor<4xi32>, tensor<i32>) -> tensor<*xi32>
 
   // CHECK: "tosa.pow"(%arg0, %arg1) : (tensor<4xi32>, tensor<i32>) -> tensor<4xi32>
@@ -245,10 +245,10 @@ func.func @test_select_i32(%arg0 : tensor<4xi1>, %arg1 : tensor<i32>, %arg2 : te
 
 // CHECK-LABEL: @test_static_argmax
 func.func @test_static_argmax(%arg0 : tensor<2x3xi32>) -> () {
-  // CHECK: "tosa.argmax"(%arg0) {axis = 0 : i64} : (tensor<2x3xi32>) -> tensor<3xi32>
+  // CHECK: "tosa.argmax"(%arg0) <{axis = 0 : i64}> : (tensor<2x3xi32>) -> tensor<3xi32>
   %0 = "tosa.argmax"(%arg0) {axis = 0 : i64} : (tensor<2x3xi32>) -> tensor<?xi32>
 
-  // CHECK: "tosa.argmax"(%arg0) {axis = 1 : i64} : (tensor<2x3xi32>) -> tensor<2xi32>
+  // CHECK: "tosa.argmax"(%arg0) <{axis = 1 : i64}> : (tensor<2x3xi32>) -> tensor<2xi32>
   %1 = "tosa.argmax"(%arg0) {axis = 1 : i64} : (tensor<2x3xi32>) -> tensor<?xi32>
   return
 }
@@ -257,10 +257,10 @@ func.func @test_static_argmax(%arg0 : tensor<2x3xi32>) -> () {
 
 // CHECK-LABEL: @test_dynamic_argmax
 func.func @test_dynamic_argmax(%arg0 : tensor<2x?xi32>) -> () {
-  // CHECK: "tosa.argmax"(%arg0) {axis = 0 : i64} : (tensor<2x?xi32>) -> tensor<?xi32>
+  // CHECK: "tosa.argmax"(%arg0) <{axis = 0 : i64}> : (tensor<2x?xi32>) -> tensor<?xi32>
   %0 = "tosa.argmax"(%arg0) {axis = 0 : i64} : (tensor<2x?xi32>) -> tensor<?xi32>
 
-  // CHECK: "tosa.argmax"(%arg0) {axis = 1 : i64} : (tensor<2x?xi32>) -> tensor<2xi32>
+  // CHECK: "tosa.argmax"(%arg0) <{axis = 1 : i64}> : (tensor<2x?xi32>) -> tensor<2xi32>
   %1 = "tosa.argmax"(%arg0) {axis = 1 : i64} : (tensor<2x?xi32>) -> tensor<?xi32>
   return
 }
@@ -372,13 +372,13 @@ func.func @test_table_dynamic(%arg0 : tensor<4x?xi16>, %arg1 : tensor<513xi16>)
 
 // CHECK-LABEL: @test_static_reshape
 func.func @test_static_reshape(%arg0 : tensor<4x4xi32>) -> () {
-  // CHECK: "tosa.reshape"(%arg0) {new_shape = array<i64: 16>} : (tensor<4x4xi32>) -> tensor<16xi32>
+  // CHECK: "tosa.reshape"(%arg0) <{new_shape = array<i64: 16>}> : (tensor<4x4xi32>) -> tensor<16xi32>
   %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 16>} : (tensor<4x4xi32>)  -> tensor<?xi32>
 
-  // CHECK: "tosa.reshape"(%arg0) {new_shape = array<i64: -1>} : (tensor<4x4xi32>) -> tensor<16xi32>
+  // CHECK: "tosa.reshape"(%arg0) <{new_shape = array<i64: -1>}> : (tensor<4x4xi32>) -> tensor<16xi32>
   %1 = "tosa.reshape"(%arg0) {new_shape = array<i64: -1>} : (tensor<4x4xi32>)  -> tensor<?xi32>
 
-  // CHECK: "tosa.reshape"(%arg0) {new_shape = array<i64: 2, -1>} : (tensor<4x4xi32>) -> tensor<2x8xi32>
+  // CHECK: "tosa.reshape"(%arg0) <{new_shape = array<i64: 2, -1>}> : (tensor<4x4xi32>) -> tensor<2x8xi32>
   %2 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, -1>} : (tensor<4x4xi32>)  -> tensor<?x?xi32>
 
   return
@@ -387,13 +387,13 @@ func.func @test_static_reshape(%arg0 : tensor<4x4xi32>) -> () {
 
 // CHECK-LABEL: @test_dynamic_reshape
 func.func @test_dynamic_reshape(%arg0 : tensor<4x?xi32>) -> () {
-  // CHECK: %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 16>} : (tensor<4x?xi32>) -> tensor<16xi32>
+  // CHECK: %0 = "tosa.reshape"(%arg0) <{new_shape = array<i64: 16>}> : (tensor<4x?xi32>) -> tensor<16xi32>
   %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 16>} : (tensor<4x?xi32>)  -> tensor<?xi32>
 
-  // CHECK: %1 = "tosa.reshape"(%arg0) {new_shape = array<i64: -1>} : (tensor<4x?xi32>) -> tensor<?xi32>
+  // CHECK: %1 = "tosa.reshape"(%arg0) <{new_shape = array<i64: -1>}> : (tensor<4x?xi32>) -> tensor<?xi32>
   %1 = "tosa.reshape"(%arg0) {new_shape = array<i64: -1>} : (tensor<4x?xi32>)  -> tensor<?xi32>
 
-  // CHECK: %2 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, -1>} : (tensor<4x?xi32>) -> tensor<2x?xi32>
+  // CHECK: %2 = "tosa.reshape"(%arg0) <{new_shape = array<i64: 2, -1>}> : (tensor<4x?xi32>) -> tensor<2x?xi32>
   %2 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, -1>} : (tensor<4x?xi32>)  -> tensor<?x?xi32>
 
   return
@@ -403,19 +403,19 @@ func.func @test_dynamic_reshape(%arg0 : tensor<4x?xi32>) -> () {
 
 // CHECK: @test_reduce_binary
 func.func @test_reduce_binary(%arg0 : tensor<2x3x?x?xi1>) -> () {
-  // CHECK: "tosa.reduce_all"(%arg0) {axis = 0 : i64} : (tensor<2x3x?x?xi1>) -> tensor<1x3x?x?xi1>
+  // CHECK: "tosa.reduce_all"(%arg0) <{axis = 0 : i64}> : (tensor<2x3x?x?xi1>) -> tensor<1x3x?x?xi1>
   %0 = "tosa.reduce_all"(%arg0) {axis = 0 : i64} : (tensor<2x3x?x?xi1>) -> tensor<?x?x?x?xi1>
 
-  // CHECK: "tosa.reduce_all"(%arg0) {axis = 1 : i64} : (tensor<2x3x?x?xi1>) -> tensor<2x1x?x?xi1>
+  // CHECK: "tosa.reduce_all"(%arg0) <{axis = 1 : i64}> : (tensor<2x3x?x?xi1>) -> tensor<2x1x?x?xi1>
   %1 = "tosa.reduce_all"(%arg0) {axis = 1 : i64} : (tensor<2x3x?x?xi1>) -> tensor<?x?x?x?xi1>
 
-  // CHECK: "tosa.reduce_all"(%arg0) {axis = 2 : i64} : (tensor<2x3x?x?xi1>) -> tensor<2x3x1x?xi1>
+  // CHECK: "tosa.reduce_all"(%arg0) <{axis = 2 : i64}> : (tensor<2x3x?x?xi1>) -> tensor<2x3x1x?xi1>
   %2 = "tosa.reduce_all"(%arg0) {axis = 2 : i64} : (tensor<2x3x?x?xi1>) -> tensor<?x?x?x?xi1>
 
-  // CHECK: "tosa.reduce_all"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xi1>) -> tensor<2x3x?x1xi1>
+  // CHECK: "tosa.reduce_all"(%arg0) <{axis = 3 : i64}> : (tensor<2x3x?x?xi1>) -> tensor<2x3x?x1xi1>
   %3 = "tosa.reduce_all"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xi1>) -> tensor<?x?x?x?xi1>
 
-  // CHECK: "tosa.reduce_any"(%arg0) {axis = 0 : i64} : (tensor<2x3x?x?xi1>) -> tensor<1x3x?x?xi1>
+  // CHECK: "tosa.reduce_any"(%arg0) <{axis = 0 : i64}> : (tensor<2x3x?x?xi1>) -> tensor<1x3x?x?xi1>
   %4 = "tosa.reduce_any"(%arg0) {axis = 0 : i64} : (tensor<2x3x?x?xi1>) -> tensor<?x?x?x?xi1>
 
   return
@@ -425,25 +425,25 @@ func.func @test_reduce_binary(%arg0 : tensor<2x3x?x?xi1>) -> () {
 
 // CHECK: @test_reduce_float
 func.func @test_reduce_float(%arg0 : tensor<2x3x?x?xf32>) -> () {
-  // CHECK: "tosa.reduce_sum"(%arg0) {axis = 0 : i64} : (tensor<2x3x?x?xf32>) -> tensor<1x3x?x?xf32>
+  // CHECK: "tosa.reduce_sum"(%arg0) <{axis = 0 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<1x3x?x?xf32>
   %0 = "tosa.reduce_sum"(%arg0) {axis = 0 : i64} : (tensor<2x3x?x?xf32>) -> tensor<?x?x?x?xf32>
 
-  // CHECK: "tosa.reduce_sum"(%arg0) {axis = 1 : i64} : (tensor<2x3x?x?xf32>) -> tensor<2x1x?x?xf32>
+  // CHECK: "tosa.reduce_sum"(%arg0) <{axis = 1 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<2x1x?x?xf32>
   %1 = "tosa.reduce_sum"(%arg0) {axis = 1 : i64} : (tensor<2x3x?x?xf32>) -> tensor<?x?x?x?xf32>
 
-  // CHECK: "tosa.reduce_sum"(%arg0) {axis = 2 : i64} : (tensor<2x3x?x?xf32>) -> tensor<2x3x1x?xf32>
+  // CHECK: "tosa.reduce_sum"(%arg0) <{axis = 2 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<2x3x1x?xf32>
   %2 = "tosa.reduce_sum"(%arg0) {axis = 2 : i64} : (tensor<2x3x?x?xf32>) -> tensor<?x?x?x?xf32>
 
-  // CHECK: "tosa.reduce_sum"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32>
+  // CHECK: "tosa.reduce_sum"(%arg0) <{axis = 3 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32>
   %3 = "tosa.reduce_sum"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor<?x?x?x?xf32>
 
-  // CHECK: "tosa.reduce_max"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32>
+  // CHECK: "tosa.reduce_max"(%arg0) <{axis = 3 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32>
   %4 = "tosa.reduce_max"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor<?x?x?x?xf32>
 
-  // CHECK: "tosa.reduce_min"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32>
+  // CHECK: "tosa.reduce_min"(%arg0) <{axis = 3 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32>
   %5 = "tosa.reduce_min"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor<?x?x?x?xf32>
 
-  // CHECK: "tosa.reduce_prod"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32>
+  // CHECK: "tosa.reduce_prod"(%arg0) <{axis = 3 : i64}> : (tensor<2x3x?x?xf32>) -> tensor<2x3x?x1xf32>
   %6 = "tosa.reduce_prod"(%arg0) {axis = 3 : i64} : (tensor<2x3x?x?xf32>) -> tensor<?x?x?x?xf32>
 
   return
@@ -453,7 +453,7 @@ func.func @test_reduce_float(%arg0 : tensor<2x3x?x?xf32>) -> () {
 
 // CHECK-LABEL: @test_concat
 func.func @test_concat(%arg0 : tensor<1x2xf32>, %arg1 : tensor<2x2xf32>) -> () {
-  // CHECK: "tosa.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor<1x2xf32>, tensor<2x2xf32>) -> tensor<3x2xf32>
+  // CHECK: "tosa.concat"(%arg0, %arg1) <{axis = 0 : i64}> : (tensor<1x2xf32>, tensor<2x2xf32>) -> tensor<3x2xf32>
   %0 = "tosa.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor<1x2xf32>, tensor<2x2xf32>) -> tensor<?x?xf32>
 
   return
@@ -463,7 +463,7 @@ func.func @test_concat(%arg0 : tensor<1x2xf32>, %arg1 : tensor<2x2xf32>) -> () {
 
 // CHECK-LABEL: @test_concat_dynamic
 func.func @test_concat_dynamic(%arg0 : tensor<1x2xf32>, %arg1 : tensor<2x?xf32>) -> () {
-  // CHECK: "tosa.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor<1x2xf32>, tensor<2x?xf32>) -> tensor<3x2xf32>
+  // CHECK: "tosa.concat"(%arg0, %arg1) <{axis = 0 : i64}> : (tensor<1x2xf32>, tensor<2x?xf32>) -> tensor<3x2xf32>
   %0 = "tosa.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor<1x2xf32>, tensor<2x?xf32>) -> tensor<?x?xf32>
 
   return
@@ -473,7 +473,7 @@ func.func @test_concat_dynamic(%arg0 : tensor<1x2xf32>, %arg1 : tensor<2x?xf32>)
 
 // CHECK-LABEL: @test_concat_dynamic_axis
 func.func @test_concat_dynamic_axis(%arg0 : tensor<?x2xf32>, %arg1 : tensor<2x2xf32>) -> () {
-  // CHECK: "tosa.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
+  // CHECK: "tosa.concat"(%arg0, %arg1) <{axis = 0 : i64}> : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
   %0 = "tosa.concat"(%arg0, %arg1) {axis = 0 : i64} : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x?xf32>
 
   return
@@ -483,7 +483,7 @@ func.func @test_concat_dynamic_axis(%arg0 : tensor<?x2xf32>, %arg1 : tensor<2x2x
 
 // CHECK-LABEL: @test_concat_axis_1
 func.func @test_concat_axis_1(%arg0 : tensor<2x1xf32>, %arg1 : tensor<2x2xf32>) -> () {
-  // CHECK: "tosa.concat"(%arg0, %arg1) {axis = 1 : i64} : (tensor<2x1xf32>, tensor<2x2xf32>) -> tensor<2x3xf32>
+  // CHECK: "tosa.concat"(%arg0, %arg1) <{axis = 1 : i64}> : (tensor<2x1xf32>, tensor<2x2xf32>) -> tensor<2x3xf32>
   %0 = "tosa.concat"(%arg0, %arg1) {axis = 1 : i64} : (tensor<2x1xf32>, tensor<2x2xf32>) -> tensor<?x?xf32>
 
   return
@@ -522,7 +522,7 @@ func.func @test_padding_simple(%arg0 : tensor<1x2xf32>) -> () {
 
 // CHECK-LABEL: @test_slice
 func.func @test_slice(%arg0 : tensor<?xi32>) -> () {
-  // CHECK: "tosa.slice"(%arg0) {size = array<i64: 2>, start = array<i64: 1>} : (tensor<?xi32>) -> tensor<2xi32>
+  // CHECK: "tosa.slice"(%arg0) <{size = array<i64: 2>, start = array<i64: 1>}> : (tensor<?xi32>) -> tensor<2xi32>
   %0 = "tosa.slice"(%arg0) { size = array<i64: 2>, start = array<i64: 1> } : (tensor<?xi32>) -> tensor<?xi32>
   return
 }
@@ -531,7 +531,7 @@ func.func @test_slice(%arg0 : tensor<?xi32>) -> () {
 
 // CHECK-LABEL: @test_slice_dynamic
 func.func @test_slice_dynamic(%arg0 : tensor<10x?x2xf32>) -> () {
-  // CHECK: "tosa.slice"(%arg0) {size = array<i64: 7, -1, 1>, start = array<i64: 1, 0, 0>} : (tensor<10x?x2xf32>) -> tensor<7x?x1xf32>
+  // CHECK: "tosa.slice"(%arg0) <{size = array<i64: 7, -1, 1>, start = array<i64: 1, 0, 0>}> : (tensor<10x?x2xf32>) -> tensor<7x?x1xf32>
   %0 = "tosa.slice"(%arg0) {size = array<i64: 7, -1, 1>, start = array<i64: 1, 0, 0>} : (tensor<10x?x2xf32>) -> tensor<?x?x?xf32>
   return
 }
@@ -540,8 +540,8 @@ func.func @test_slice_dynamic(%arg0 : tensor<10x?x2xf32>) -> () {
 
 // CHECK-LABEL: @test_tile
 func.func @test_tile(%arg0 : tensor<2x3x?xi32>) -> () {
-  // CHECK: "tosa.tile"(%arg0) {multiples = array<i64: 2, 1, 5>} : (tensor<2x3x?xi32>) -> tensor<4x3x?xi32>
-  %0 = "tosa.tile"(%arg0) {multiples = array<i64: 2, 1, 5>} : (tensor<2x3x?xi32>)  -> (tensor<?x?x?xi32>)
+  // CHECK: "tosa.tile"(%arg0) <{multiples = array<i64: 2, 1, 5>}> : (tensor<2x3x?xi32>) -> tensor<4x3x?xi32>
+  %0 = "tosa.tile"(%arg0) <{multiples = array<i64: 2, 1, 5>}> : (tensor<2x3x?xi32>)  -> (tensor<?x?x?xi32>)
   return
 }
 


        


More information about the Mlir-commits mailing list