[Mlir-commits] [mlir] 4df5310 - [mlir][spirv] Use assemblyFormat to define groupNonUniform op assembly (#115662)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Nov 13 08:33:39 PST 2024


Author: Yadong Chen
Date: 2024-11-13T11:33:33-05:00
New Revision: 4df5310ffc82c0382f508d969e19521200ab357b

URL: https://github.com/llvm/llvm-project/commit/4df5310ffc82c0382f508d969e19521200ab357b
DIFF: https://github.com/llvm/llvm-project/commit/4df5310ffc82c0382f508d969e19521200ab357b.diff

LOG: [mlir][spirv] Use assemblyFormat to define groupNonUniform op assembly (#115662)

Declarative assemblyFormat ODS is more concise and requires less
boilerplate than filling out CPP interfaces.

Changes:
* updates the Ops defined in `SPIRVNonUniformOps.td and
SPIRVGroupOps.td` to use assemblyFormat.
* Removes print/parse from `GroupOps.cpp` which is now generated by
assemblyFormat
* Updates tests to updated format (largely using <operand> in place of
"operand" and complementing type information)

Issue: #73359

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td
    mlir/lib/Dialect/SPIRV/IR/GroupOps.cpp
    mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
    mlir/test/Conversion/ConvertToSPIRV/argmax-kernel.mlir
    mlir/test/Conversion/ConvertToSPIRV/gpu.mlir
    mlir/test/Conversion/GPUToSPIRV/reductions.mlir
    mlir/test/Dialect/SPIRV/IR/group-ops.mlir
    mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
    mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
    mlir/test/Target/SPIRV/debug.mlir
    mlir/test/Target/SPIRV/group-ops.mlir
    mlir/test/Target/SPIRV/non-uniform-ops.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td
index dd25fbbce14b9a..a8743b196bfe77 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td
@@ -661,6 +661,12 @@ def SPIRV_INTELSubgroupBlockReadOp : SPIRV_IntelVendorOp<"SubgroupBlockRead", []
   let results = (outs
     SPIRV_Type:$value
   );
+
+  let hasCustomAssemblyFormat = 0;
+
+  let assemblyFormat = [{
+    $ptr attr-dict `:` type($ptr) `->` type($value)
+  }];
 }
 
 // -----

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td
index a32f625ae82112..a1b866387e2ec0 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td
@@ -26,7 +26,13 @@ class SPIRV_GroupNonUniformArithmeticOp<string mnemonic, Type type,
 
   let results = (outs
     SPIRV_ScalarOrVectorOf<type>:$result
-  );
+  );  
+  
+  let hasCustomAssemblyFormat = 0;
+
+  let assemblyFormat = [{
+    $execution_scope $group_operation $value (`cluster_size``(` $cluster_size^ `)`)? attr-dict `:` type($value) (`,` type($cluster_size)^)? `->` type(results)
+  }]; 
 }
 
 // -----
@@ -318,24 +324,14 @@ def SPIRV_GroupNonUniformFAddOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni
 
     <!-- End of AutoGen section -->
 
-    ```
-    scope ::= `"Workgroup"` | `"Subgroup"`
-    operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
-    float-scalar-vector-type ::= float-type |
-                                 `vector<` integer-literal `x` float-type `>`
-    non-uniform-fadd-op ::= ssa-id `=` `spirv.GroupNonUniformFAdd` scope operation
-                            ssa-use ( `cluster_size` `(` ssa_use `)` )?
-                            `:` float-scalar-vector-type
-    ```
-
     #### Example:
 
     ```mlir
     %four = spirv.Constant 4 : i32
     %scalar = ... : f32
     %vector = ... : vector<4xf32>
-    %0 = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %scalar : f32
-    %1 = spirv.GroupNonUniformFAdd "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
+    %0 = spirv.GroupNonUniformFAdd <Workgroup> <Reduce> %scalar : f32 -> f32
+    %1 = spirv.GroupNonUniformFAdd <Subgroup> <ClusteredReduce> %vector cluster_size(%four) : vector<4xf32>, i32 -> vector<4xf32>
     ```
   }];
 
@@ -378,24 +374,14 @@ def SPIRV_GroupNonUniformFMaxOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni
 
     <!-- End of AutoGen section -->
 
-    ```
-    scope ::= `"Workgroup"` | `"Subgroup"`
-    operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
-    float-scalar-vector-type ::= float-type |
-                                 `vector<` integer-literal `x` float-type `>`
-    non-uniform-fmax-op ::= ssa-id `=` `spirv.GroupNonUniformFMax` scope operation
-                            ssa-use ( `cluster_size` `(` ssa_use `)` )?
-                            `:` float-scalar-vector-type
-    ```
-
     #### Example:
 
     ```mlir
     %four = spirv.Constant 4 : i32
     %scalar = ... : f32
     %vector = ... : vector<4xf32>
-    %0 = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %scalar : f32
-    %1 = spirv.GroupNonUniformFMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
+    %0 = spirv.GroupNonUniformFMax <Workgroup> <Reduce> %scalar : f32 -> f32
+    %1 = spirv.GroupNonUniformFMax <Subgroup> <ClusteredReduce> %vector cluster_size(%four) : vector<4xf32>, i32 -> vector<4xf32>
     ```
   }];
 
@@ -438,24 +424,14 @@ def SPIRV_GroupNonUniformFMinOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni
 
     <!-- End of AutoGen section -->
 
-    ```
-    scope ::= `"Workgroup"` | `"Subgroup"`
-    operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
-    float-scalar-vector-type ::= float-type |
-                                 `vector<` integer-literal `x` float-type `>`
-    non-uniform-fmin-op ::= ssa-id `=` `spirv.GroupNonUniformFMin` scope operation
-                            ssa-use ( `cluster_size` `(` ssa_use `)` )?
-                            `:` float-scalar-vector-type
-    ```
-
     #### Example:
 
     ```mlir
     %four = spirv.Constant 4 : i32
     %scalar = ... : f32
     %vector = ... : vector<4xf32>
-    %0 = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %scalar : f32
-    %1 = spirv.GroupNonUniformFMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
+    %0 = spirv.GroupNonUniformFMin <Workgroup> <Reduce> %scalar : f32 -> i32
+    %1 = spirv.GroupNonUniformFMin <Subgroup> <ClusteredReduce> %vector cluster_size(%four) : vector<4xf32>, i32 -> vector<4xf32>
     ```
   }];
 
@@ -495,24 +471,14 @@ def SPIRV_GroupNonUniformFMulOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni
 
     <!-- End of AutoGen section -->
 
-    ```
-    scope ::= `"Workgroup"` | `"Subgroup"`
-    operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
-    float-scalar-vector-type ::= float-type |
-                                 `vector<` integer-literal `x` float-type `>`
-    non-uniform-fmul-op ::= ssa-id `=` `spirv.GroupNonUniformFMul` scope operation
-                            ssa-use ( `cluster_size` `(` ssa_use `)` )?
-                            `:` float-scalar-vector-type
-    ```
-
     #### Example:
 
     ```mlir
     %four = spirv.Constant 4 : i32
     %scalar = ... : f32
     %vector = ... : vector<4xf32>
-    %0 = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %scalar : f32
-    %1 = spirv.GroupNonUniformFMul "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
+    %0 = spirv.GroupNonUniformFMul <Workgroup> <Reduce> %scalar : f32 -> f32
+    %1 = spirv.GroupNonUniformFMul <Subgroup> <ClusteredReduce> %vector cluster_size(%four) : vector<4xf32>, i32 -> vector<4xf32>
     ```
   }];
 
@@ -550,24 +516,14 @@ def SPIRV_GroupNonUniformIAddOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni
 
     <!-- End of AutoGen section -->
 
-    ```
-    scope ::= `"Workgroup"` | `"Subgroup"`
-    operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
-    integer-scalar-vector-type ::= integer-type |
-                                 `vector<` integer-literal `x` integer-type `>`
-    non-uniform-iadd-op ::= ssa-id `=` `spirv.GroupNonUniformIAdd` scope operation
-                            ssa-use ( `cluster_size` `(` ssa_use `)` )?
-                            `:` integer-scalar-vector-type
-    ```
-
     #### Example:
 
     ```mlir
     %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %scalar : i32
-    %1 = spirv.GroupNonUniformIAdd "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformIAdd <Workgroup> <Reduce> %scalar : i32 -> i32
+    %1 = spirv.GroupNonUniformIAdd <Subgroup> <ClusteredReduce> %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32>
     ```
   }];
 
@@ -605,24 +561,14 @@ def SPIRV_GroupNonUniformIMulOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni
 
     <!-- End of AutoGen section -->
 
-    ```
-    scope ::= `"Workgroup"` | `"Subgroup"`
-    operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
-    integer-scalar-vector-type ::= integer-type |
-                                 `vector<` integer-literal `x` integer-type `>`
-    non-uniform-imul-op ::= ssa-id `=` `spirv.GroupNonUniformIMul` scope operation
-                            ssa-use ( `cluster_size` `(` ssa_use `)` )?
-                            `:` integer-scalar-vector-type
-    ```
-
     #### Example:
 
     ```mlir
     %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %scalar : i32
-    %1 = spirv.GroupNonUniformIMul "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformIMul <Workgroup> <Reduce> %scalar : i32 -> i32
+    %1 = spirv.GroupNonUniformIMul <Subgroup> <ClusteredReduce> %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32>
     ```
   }];
 
@@ -662,24 +608,14 @@ def SPIRV_GroupNonUniformSMaxOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni
 
     <!-- End of AutoGen section -->
 
-    ```
-    scope ::= `"Workgroup"` | `"Subgroup"`
-    operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
-    integer-scalar-vector-type ::= integer-type |
-                                 `vector<` integer-literal `x` integer-type `>`
-    non-uniform-smax-op ::= ssa-id `=` `spirv.GroupNonUniformSMax` scope operation
-                            ssa-use ( `cluster_size` `(` ssa_use `)` )?
-                            `:` integer-scalar-vector-type
-    ```
-
     #### Example:
 
     ```mlir
     %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %scalar : i32
-    %1 = spirv.GroupNonUniformSMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformSMax <Workgroup> <Reduce> %scalar : i32
+    %1 = spirv.GroupNonUniformSMax <Subgroup> <ClusteredReduce> %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32>
     ```
   }];
 
@@ -719,24 +655,14 @@ def SPIRV_GroupNonUniformSMinOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni
 
     <!-- End of AutoGen section -->
 
-    ```
-    scope ::= `"Workgroup"` | `"Subgroup"`
-    operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
-    integer-scalar-vector-type ::= integer-type |
-                                 `vector<` integer-literal `x` integer-type `>`
-    non-uniform-smin-op ::= ssa-id `=` `spirv.GroupNonUniformSMin` scope operation
-                            ssa-use ( `cluster_size` `(` ssa_use `)` )?
-                            `:` integer-scalar-vector-type
-    ```
-
     #### Example:
 
     ```mlir
     %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spirv.GroupNonUniformSMin "Workgroup" "Reduce" %scalar : i32
-    %1 = spirv.GroupNonUniformSMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformSMin <Workgroup> <Reduce> %scalar : i32 -> i32
+    %1 = spirv.GroupNonUniformSMin <Subgroup> <ClusteredReduce> %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32>
     ```
   }];
 
@@ -992,24 +918,14 @@ def SPIRV_GroupNonUniformUMaxOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni
 
     <!-- End of AutoGen section -->
 
-    ```
-    scope ::= `"Workgroup"` | `"Subgroup"`
-    operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
-    integer-scalar-vector-type ::= integer-type |
-                                 `vector<` integer-literal `x` integer-type `>`
-    non-uniform-umax-op ::= ssa-id `=` `spirv.GroupNonUniformUMax` scope operation
-                            ssa-use ( `cluster_size` `(` ssa_use `)` )?
-                            `:` integer-scalar-vector-type
-    ```
-
     #### Example:
 
     ```mlir
     %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %scalar : i32
-    %1 = spirv.GroupNonUniformUMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformUMax <Workgroup> <Reduce> %scalar : i32 -> i32
+    %1 = spirv.GroupNonUniformUMax <Subgroup> <ClusteredReduce> %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32>
     ```
   }];
 
@@ -1050,24 +966,14 @@ def SPIRV_GroupNonUniformUMinOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni
 
     <!-- End of AutoGen section -->
 
-    ```
-    scope ::= `"Workgroup"` | `"Subgroup"`
-    operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
-    integer-scalar-vector-type ::= integer-type |
-                                 `vector<` integer-literal `x` integer-type `>`
-    non-uniform-umin-op ::= ssa-id `=` `spirv.GroupNonUniformUMin` scope operation
-                            ssa-use ( `cluster_size` `(` ssa_use `)` )?
-                            `:` integer-scalar-vector-type
-    ```
-
     #### Example:
 
     ```mlir
     %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %scalar : i32
-    %1 = spirv.GroupNonUniformUMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformUMin <Workgroup> <Reduce> %scalar : i32 -> i32
+    %1 = spirv.GroupNonUniformUMin <Subgroup> <ClusteredReduce> %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32>
     ```
   }];
 
@@ -1113,9 +1019,9 @@ def SPIRV_GroupNonUniformBitwiseAndOp :
     %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spirv.GroupNonUniformBitwiseAnd "Workgroup" "Reduce" %scalar : i32
-    %1 = spirv.GroupNonUniformBitwiseAnd "Subgroup" "ClusteredReduce"
-           %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformBitwiseAnd <Workgroup> <Reduce> %scalar : i32 -> i32
+    %1 = spirv.GroupNonUniformBitwiseAnd <Subgroup> <ClusteredReduce>
+           %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32>
     ```
   }];
 
@@ -1163,9 +1069,9 @@ def SPIRV_GroupNonUniformBitwiseOrOp :
     %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spirv.GroupNonUniformBitwiseOr "Workgroup" "Reduce" %scalar : i32
-    %1 = spirv.GroupNonUniformBitwiseOr "Subgroup" "ClusteredReduce"
-           %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformBitwiseOr <Workgroup> <Reduce> %scalar : i32 -> i32
+    %1 = spirv.GroupNonUniformBitwiseOr <Subgroup> <ClusteredReduce>
+           %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32>
     ```
   }];
 
@@ -1213,9 +1119,9 @@ def SPIRV_GroupNonUniformBitwiseXorOp :
     %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spirv.GroupNonUniformBitwiseXor "Workgroup" "Reduce" %scalar : i32
-    %1 = spirv.GroupNonUniformBitwiseXor "Subgroup" "ClusteredReduce"
-           %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformBitwiseXor <Workgroup> <Reduce> %scalar : i32 -> i32
+    %1 = spirv.GroupNonUniformBitwiseXor <Subgroup> <ClusteredReduce>
+           %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32>
     ```
   }];
 
@@ -1263,9 +1169,9 @@ def SPIRV_GroupNonUniformLogicalAndOp :
     %four = spirv.Constant 4 : i32
     %scalar = ... : i1
     %vector = ... : vector<4xi1>
-    %0 = spirv.GroupNonUniformLogicalAnd "Workgroup" "Reduce" %scalar : i1
-    %1 = spirv.GroupNonUniformLogicalAnd "Subgroup" "ClusteredReduce"
-           %vector cluster_size(%four) : vector<4xi1>
+    %0 = spirv.GroupNonUniformLogicalAnd <Workgroup> <Reduce> %scalar : i1 -> i1
+    %1 = spirv.GroupNonUniformLogicalAnd <Subgroup> <ClusteredReduce>
+           %vector cluster_size(%four) : vector<4xi1>, i32 -> vector<4xi1>
     ```
   }];
 
@@ -1313,9 +1219,9 @@ def SPIRV_GroupNonUniformLogicalOrOp :
     %four = spirv.Constant 4 : i32
     %scalar = ... : i1
     %vector = ... : vector<4xi1>
-    %0 = spirv.GroupNonUniformLogicalOr "Workgroup" "Reduce" %scalar : i1
-    %1 = spirv.GroupNonUniformLogicalOr "Subgroup" "ClusteredReduce"
-           %vector cluster_size(%four) : vector<4xi1>
+    %0 = spirv.GroupNonUniformLogicalOr <Workgroup> <Reduce> %scalar : i1 -> i1
+    %1 = spirv.GroupNonUniformLogicalOr <Subgroup> <ClusteredReduce>
+           %vector cluster_size(%four) : vector<4xi1>, i32 -> vector<4xi1>
     ```
   }];
 
@@ -1363,9 +1269,9 @@ def SPIRV_GroupNonUniformLogicalXorOp :
     %four = spirv.Constant 4 : i32
     %scalar = ... : i1
     %vector = ... : vector<4xi1>
-    %0 = spirv.GroupNonUniformLogicalXor "Workgroup" "Reduce" %scalar : i1
-    %1 = spirv.GroupNonUniformLogicalXor "Subgroup" "ClusteredReduce"
-           %vector cluster_size(%four) : vector<4xi>
+    %0 = spirv.GroupNonUniformLogicalXor <Workgroup> <Reduce> %scalar : i1 -> i1
+    %1 = spirv.GroupNonUniformLogicalXor <Subgroup> <ClusteredReduce>
+           %vector cluster_size(%four) : vector<4xi1>, i32 -> vector<4xi1>
     ```
   }];
 

diff  --git a/mlir/lib/Dialect/SPIRV/IR/GroupOps.cpp b/mlir/lib/Dialect/SPIRV/IR/GroupOps.cpp
index 2e5a2aab52a160..8aeafda0eb755a 100644
--- a/mlir/lib/Dialect/SPIRV/IR/GroupOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/GroupOps.cpp
@@ -20,70 +20,6 @@ using namespace mlir::spirv::AttrNames;
 
 namespace mlir::spirv {
 
-template <typename OpTy>
-static ParseResult parseGroupNonUniformArithmeticOp(OpAsmParser &parser,
-                                                    OperationState &state) {
-  spirv::Scope executionScope;
-  GroupOperation groupOperation;
-  OpAsmParser::UnresolvedOperand valueInfo;
-  if (spirv::parseEnumStrAttr<spirv::ScopeAttr>(
-          executionScope, parser, state,
-          OpTy::getExecutionScopeAttrName(state.name)) ||
-      spirv::parseEnumStrAttr<GroupOperationAttr>(
-          groupOperation, parser, state,
-          OpTy::getGroupOperationAttrName(state.name)) ||
-      parser.parseOperand(valueInfo))
-    return failure();
-
-  std::optional<OpAsmParser::UnresolvedOperand> clusterSizeInfo;
-  if (succeeded(parser.parseOptionalKeyword(kClusterSize))) {
-    clusterSizeInfo = OpAsmParser::UnresolvedOperand();
-    if (parser.parseLParen() || parser.parseOperand(*clusterSizeInfo) ||
-        parser.parseRParen())
-      return failure();
-  }
-
-  Type resultType;
-  if (parser.parseColonType(resultType))
-    return failure();
-
-  if (parser.resolveOperand(valueInfo, resultType, state.operands))
-    return failure();
-
-  if (clusterSizeInfo) {
-    Type i32Type = parser.getBuilder().getIntegerType(32);
-    if (parser.resolveOperand(*clusterSizeInfo, i32Type, state.operands))
-      return failure();
-  }
-
-  return parser.addTypeToList(resultType, state.types);
-}
-
-template <typename GroupNonUniformArithmeticOpTy>
-static void printGroupNonUniformArithmeticOp(Operation *groupOp,
-                                             OpAsmPrinter &printer) {
-  printer
-      << " \""
-      << stringifyScope(
-             groupOp
-                 ->getAttrOfType<spirv::ScopeAttr>(
-                     GroupNonUniformArithmeticOpTy::getExecutionScopeAttrName(
-                         groupOp->getName()))
-                 .getValue())
-      << "\" \""
-      << stringifyGroupOperation(
-             groupOp
-                 ->getAttrOfType<GroupOperationAttr>(
-                     GroupNonUniformArithmeticOpTy::getGroupOperationAttrName(
-                         groupOp->getName()))
-                 .getValue())
-      << "\" " << groupOp->getOperand(0);
-
-  if (groupOp->getNumOperands() > 1)
-    printer << " " << kClusterSize << '(' << groupOp->getOperand(1) << ')';
-  printer << " : " << groupOp->getResult(0).getType();
-}
-
 template <typename OpTy>
 static LogicalResult verifyGroupNonUniformArithmeticOp(Operation *groupOp) {
   spirv::Scope scope =
@@ -248,16 +184,6 @@ LogicalResult GroupNonUniformFAddOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformFAddOp>(*this);
 }
 
-ParseResult GroupNonUniformFAddOp::parse(OpAsmParser &parser,
-                                         OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformFAddOp>(parser,
-                                                                 result);
-}
-
-void GroupNonUniformFAddOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformFAddOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformFMaxOp
 //===----------------------------------------------------------------------===//
@@ -266,16 +192,6 @@ LogicalResult GroupNonUniformFMaxOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformFMaxOp>(*this);
 }
 
-ParseResult GroupNonUniformFMaxOp::parse(OpAsmParser &parser,
-                                         OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformFMaxOp>(parser,
-                                                                 result);
-}
-
-void GroupNonUniformFMaxOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformFMaxOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformFMinOp
 //===----------------------------------------------------------------------===//
@@ -284,16 +200,6 @@ LogicalResult GroupNonUniformFMinOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformFMinOp>(*this);
 }
 
-ParseResult GroupNonUniformFMinOp::parse(OpAsmParser &parser,
-                                         OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformFMinOp>(parser,
-                                                                 result);
-}
-
-void GroupNonUniformFMinOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformFMinOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformFMulOp
 //===----------------------------------------------------------------------===//
@@ -302,16 +208,6 @@ LogicalResult GroupNonUniformFMulOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformFMulOp>(*this);
 }
 
-ParseResult GroupNonUniformFMulOp::parse(OpAsmParser &parser,
-                                         OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformFMulOp>(parser,
-                                                                 result);
-}
-
-void GroupNonUniformFMulOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformFMulOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformIAddOp
 //===----------------------------------------------------------------------===//
@@ -320,16 +216,6 @@ LogicalResult GroupNonUniformIAddOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformIAddOp>(*this);
 }
 
-ParseResult GroupNonUniformIAddOp::parse(OpAsmParser &parser,
-                                         OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformIAddOp>(parser,
-                                                                 result);
-}
-
-void GroupNonUniformIAddOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformIAddOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformIMulOp
 //===----------------------------------------------------------------------===//
@@ -338,16 +224,6 @@ LogicalResult GroupNonUniformIMulOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformIMulOp>(*this);
 }
 
-ParseResult GroupNonUniformIMulOp::parse(OpAsmParser &parser,
-                                         OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformIMulOp>(parser,
-                                                                 result);
-}
-
-void GroupNonUniformIMulOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformIMulOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformSMaxOp
 //===----------------------------------------------------------------------===//
@@ -356,16 +232,6 @@ LogicalResult GroupNonUniformSMaxOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformSMaxOp>(*this);
 }
 
-ParseResult GroupNonUniformSMaxOp::parse(OpAsmParser &parser,
-                                         OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformSMaxOp>(parser,
-                                                                 result);
-}
-
-void GroupNonUniformSMaxOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformSMaxOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformSMinOp
 //===----------------------------------------------------------------------===//
@@ -374,16 +240,6 @@ LogicalResult GroupNonUniformSMinOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformSMinOp>(*this);
 }
 
-ParseResult GroupNonUniformSMinOp::parse(OpAsmParser &parser,
-                                         OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformSMinOp>(parser,
-                                                                 result);
-}
-
-void GroupNonUniformSMinOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformSMinOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformUMaxOp
 //===----------------------------------------------------------------------===//
@@ -392,16 +248,6 @@ LogicalResult GroupNonUniformUMaxOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformUMaxOp>(*this);
 }
 
-ParseResult GroupNonUniformUMaxOp::parse(OpAsmParser &parser,
-                                         OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformUMaxOp>(parser,
-                                                                 result);
-}
-
-void GroupNonUniformUMaxOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformUMaxOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformUMinOp
 //===----------------------------------------------------------------------===//
@@ -410,16 +256,6 @@ LogicalResult GroupNonUniformUMinOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformUMinOp>(*this);
 }
 
-ParseResult GroupNonUniformUMinOp::parse(OpAsmParser &parser,
-                                         OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformUMinOp>(parser,
-                                                                 result);
-}
-
-void GroupNonUniformUMinOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformUMinOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformBitwiseAnd
 //===----------------------------------------------------------------------===//
@@ -428,16 +264,6 @@ LogicalResult GroupNonUniformBitwiseAndOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformBitwiseAndOp>(*this);
 }
 
-ParseResult GroupNonUniformBitwiseAndOp::parse(OpAsmParser &parser,
-                                               OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformBitwiseAndOp>(parser,
-                                                                       result);
-}
-
-void GroupNonUniformBitwiseAndOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformBitwiseAndOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformBitwiseOr
 //===----------------------------------------------------------------------===//
@@ -446,16 +272,6 @@ LogicalResult GroupNonUniformBitwiseOrOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformBitwiseOrOp>(*this);
 }
 
-ParseResult GroupNonUniformBitwiseOrOp::parse(OpAsmParser &parser,
-                                              OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformBitwiseOrOp>(parser,
-                                                                      result);
-}
-
-void GroupNonUniformBitwiseOrOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformBitwiseOrOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformBitwiseXor
 //===----------------------------------------------------------------------===//
@@ -464,16 +280,6 @@ LogicalResult GroupNonUniformBitwiseXorOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformBitwiseXorOp>(*this);
 }
 
-ParseResult GroupNonUniformBitwiseXorOp::parse(OpAsmParser &parser,
-                                               OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformBitwiseXorOp>(parser,
-                                                                       result);
-}
-
-void GroupNonUniformBitwiseXorOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformBitwiseXorOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformLogicalAnd
 //===----------------------------------------------------------------------===//
@@ -482,16 +288,6 @@ LogicalResult GroupNonUniformLogicalAndOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformLogicalAndOp>(*this);
 }
 
-ParseResult GroupNonUniformLogicalAndOp::parse(OpAsmParser &parser,
-                                               OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformLogicalAndOp>(parser,
-                                                                       result);
-}
-
-void GroupNonUniformLogicalAndOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformLogicalAndOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformLogicalOr
 //===----------------------------------------------------------------------===//
@@ -500,16 +296,6 @@ LogicalResult GroupNonUniformLogicalOrOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformLogicalOrOp>(*this);
 }
 
-ParseResult GroupNonUniformLogicalOrOp::parse(OpAsmParser &parser,
-                                              OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformLogicalOrOp>(parser,
-                                                                      result);
-}
-
-void GroupNonUniformLogicalOrOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformLogicalOrOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // spirv.GroupNonUniformLogicalXor
 //===----------------------------------------------------------------------===//
@@ -518,16 +304,6 @@ LogicalResult GroupNonUniformLogicalXorOp::verify() {
   return verifyGroupNonUniformArithmeticOp<GroupNonUniformLogicalXorOp>(*this);
 }
 
-ParseResult GroupNonUniformLogicalXorOp::parse(OpAsmParser &parser,
-                                               OperationState &result) {
-  return parseGroupNonUniformArithmeticOp<GroupNonUniformLogicalXorOp>(parser,
-                                                                       result);
-}
-
-void GroupNonUniformLogicalXorOp::print(OpAsmPrinter &p) {
-  printGroupNonUniformArithmeticOp<GroupNonUniformLogicalXorOp>(*this, p);
-}
-
 //===----------------------------------------------------------------------===//
 // Group op verification
 //===----------------------------------------------------------------------===//

diff  --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
index dd0a872e05dcbb..26559c1321db5e 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
@@ -1253,33 +1253,6 @@ LogicalResult spirv::GlobalVariableOp::verify() {
 // spirv.INTEL.SubgroupBlockRead
 //===----------------------------------------------------------------------===//
 
-ParseResult spirv::INTELSubgroupBlockReadOp::parse(OpAsmParser &parser,
-                                                   OperationState &result) {
-  // Parse the storage class specification
-  spirv::StorageClass storageClass;
-  OpAsmParser::UnresolvedOperand ptrInfo;
-  Type elementType;
-  if (parseEnumStrAttr(storageClass, parser) || parser.parseOperand(ptrInfo) ||
-      parser.parseColon() || parser.parseType(elementType)) {
-    return failure();
-  }
-
-  auto ptrType = spirv::PointerType::get(elementType, storageClass);
-  if (auto valVecTy = llvm::dyn_cast<VectorType>(elementType))
-    ptrType = spirv::PointerType::get(valVecTy.getElementType(), storageClass);
-
-  if (parser.resolveOperand(ptrInfo, ptrType, result.operands)) {
-    return failure();
-  }
-
-  result.addTypes(elementType);
-  return success();
-}
-
-void spirv::INTELSubgroupBlockReadOp::print(OpAsmPrinter &printer) {
-  printer << " " << getPtr() << " : " << getType();
-}
-
 LogicalResult spirv::INTELSubgroupBlockReadOp::verify() {
   if (failed(verifyBlockReadWritePtrAndValTypes(*this, getPtr(), getValue())))
     return failure();

diff  --git a/mlir/test/Conversion/ConvertToSPIRV/argmax-kernel.mlir b/mlir/test/Conversion/ConvertToSPIRV/argmax-kernel.mlir
index 5cd1fead2527b1..652f4472280869 100644
--- a/mlir/test/Conversion/ConvertToSPIRV/argmax-kernel.mlir
+++ b/mlir/test/Conversion/ConvertToSPIRV/argmax-kernel.mlir
@@ -68,7 +68,7 @@ module attributes {
         scf.yield %lane_res_next, %lane_max_next : i32, f32
       }
 
-      // CHECK: %[[SUBGROUP_MAX:.*]] = spirv.GroupNonUniformFMax "Subgroup" "Reduce" %[[LANE_MAX]] : f32
+      // CHECK: %[[SUBGROUP_MAX:.*]] = spirv.GroupNonUniformFMax <Subgroup> <Reduce> %[[LANE_MAX]] : f32 -> f32
       // CHECK: %[[OEQ:.*]] = spirv.FOrdEqual %[[LANE_MAX]], %[[SUBGROUP_MAX]] : f32
       // CHECK: %[[BALLOT:.*]] = spirv.GroupNonUniformBallot <Subgroup> %[[OEQ]] : vector<4xi32>
       // CHECK: %[[BALLOTLSB:.*]] = spirv.GroupNonUniformBallotFindLSB <Subgroup> %[[BALLOT]] : vector<4xi32>, i32

diff  --git a/mlir/test/Conversion/ConvertToSPIRV/gpu.mlir b/mlir/test/Conversion/ConvertToSPIRV/gpu.mlir
index f33a66bdf5effc..84f366e5874b03 100644
--- a/mlir/test/Conversion/ConvertToSPIRV/gpu.mlir
+++ b/mlir/test/Conversion/ConvertToSPIRV/gpu.mlir
@@ -8,7 +8,7 @@ module attributes {
 gpu.module @kernels {
   // CHECK-LABEL: spirv.func @all_reduce
   // CHECK-SAME: (%[[ARG0:.*]]: f32)
-  // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %[[ARG0]] : f32
+  // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd <Workgroup> <Reduce> %[[ARG0]] : f32 -> f32
   gpu.func @all_reduce(%arg0 : f32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
     %reduced = gpu.all_reduce add %arg0 {} : (f32) -> (f32)
@@ -28,7 +28,7 @@ module attributes {
 gpu.module @kernels {
   // CHECK-LABEL: spirv.func @subgroup_reduce
   // CHECK-SAME: (%[[ARG0:.*]]: f32)
-  // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd "Subgroup" "Reduce" %[[ARG0]] : f32
+  // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd <Subgroup> <Reduce> %[[ARG0]] : f32 -> f32
   gpu.func @subgroup_reduce(%arg0 : f32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
     %reduced = gpu.subgroup_reduce add %arg0 {} : (f32) -> (f32)

diff  --git a/mlir/test/Conversion/GPUToSPIRV/reductions.mlir b/mlir/test/Conversion/GPUToSPIRV/reductions.mlir
index 44f85f68587f1a..ae834b9915d50c 100644
--- a/mlir/test/Conversion/GPUToSPIRV/reductions.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/reductions.mlir
@@ -30,7 +30,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: f32)
   gpu.func @test(%arg : f32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %[[ARG]] : f32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd <Workgroup> <Reduce> %[[ARG]] : f32 -> f32
     %reduced = gpu.all_reduce add %arg {} : (f32) -> (f32)
     gpu.return
   }
@@ -70,7 +70,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: i32)
   gpu.func @test(%arg : i32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %[[ARG]] : i32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformIAdd <Workgroup> <Reduce> %[[ARG]] : i32 -> i32
     %reduced = gpu.all_reduce add %arg {} : (i32) -> (i32)
     gpu.return
   }
@@ -110,7 +110,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: f32)
   gpu.func @test(%arg : f32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd "Subgroup" "Reduce" %[[ARG]] : f32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd <Subgroup> <Reduce> %[[ARG]] : f32 -> f32
     %reduced = gpu.subgroup_reduce add %arg : (f32) -> (f32)
     gpu.return
   }
@@ -150,7 +150,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: i32)
   gpu.func @test(%arg : i32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformIAdd "Subgroup" "Reduce" %[[ARG]] : i32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformIAdd <Subgroup> <Reduce> %[[ARG]] : i32 -> i32
     %reduced = gpu.subgroup_reduce add %arg : (i32) -> (i32)
     gpu.return
   }
@@ -190,7 +190,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: f32)
   gpu.func @test(%arg : f32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %[[ARG]] : f32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformFMul <Workgroup> <Reduce> %[[ARG]] : f32 -> f32
     %reduced = gpu.all_reduce mul %arg {} : (f32) -> (f32)
     gpu.return
   }
@@ -230,7 +230,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: i32)
   gpu.func @test(%arg : i32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %[[ARG]] : i32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformIMul <Workgroup> <Reduce> %[[ARG]] : i32 -> i32
     %reduced = gpu.all_reduce mul %arg {} : (i32) -> (i32)
     gpu.return
   }
@@ -270,7 +270,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: f32)
   gpu.func @test(%arg : f32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformFMul "Subgroup" "Reduce" %[[ARG]] : f32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformFMul <Subgroup> <Reduce> %[[ARG]] : f32 -> f32
     %reduced = gpu.subgroup_reduce mul %arg : (f32) -> (f32)
     gpu.return
   }
@@ -310,7 +310,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: i32)
   gpu.func @test(%arg : i32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformIMul "Subgroup" "Reduce" %[[ARG]] : i32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformIMul <Subgroup> <Reduce> %[[ARG]] : i32 -> i32
     %reduced = gpu.subgroup_reduce mul %arg : (i32) -> (i32)
     gpu.return
   }
@@ -350,7 +350,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: f32)
   gpu.func @test(%arg : f32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %[[ARG]] : f32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformFMin <Workgroup> <Reduce> %[[ARG]] : f32 -> f32
     %reduced = gpu.all_reduce minnumf %arg {} : (f32) -> (f32)
     gpu.return
   }
@@ -392,7 +392,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: i32)
   gpu.func @test(%arg : i32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %[[ARG]] : i32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformUMin <Workgroup> <Reduce> %[[ARG]] : i32 -> i32
     %r0 = gpu.all_reduce minsi %arg {} : (i32) -> (i32)
     %r1 = gpu.all_reduce minui %arg {} : (i32) -> (i32)
     gpu.return
@@ -433,7 +433,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: f32)
   gpu.func @test(%arg : f32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformFMin "Subgroup" "Reduce" %[[ARG]] : f32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformFMin <Subgroup> <Reduce> %[[ARG]] : f32 -> f32
     %reduced = gpu.subgroup_reduce minnumf %arg : (f32) -> (f32)
     gpu.return
   }
@@ -475,8 +475,8 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: i32)
   gpu.func @test(%arg : i32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformSMin "Subgroup" "Reduce" %[[ARG]] : i32
-    // CHECK: %{{.*}} = spirv.GroupNonUniformUMin "Subgroup" "Reduce" %[[ARG]] : i32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformSMin <Subgroup> <Reduce> %[[ARG]] : i32 -> i32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformUMin <Subgroup> <Reduce> %[[ARG]] : i32 -> i32
     %r0 = gpu.subgroup_reduce minsi %arg : (i32) -> (i32)
     %r1 = gpu.subgroup_reduce minui %arg : (i32) -> (i32)
     gpu.return
@@ -517,7 +517,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: f32)
   gpu.func @test(%arg : f32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %[[ARG]] : f32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformFMax <Workgroup> <Reduce> %[[ARG]] : f32 -> f32
     %reduced = gpu.all_reduce maxnumf %arg {} : (f32) -> (f32)
     gpu.return
   }
@@ -559,8 +559,8 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: i32)
   gpu.func @test(%arg : i32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %[[ARG]] : i32
-    // CHECK: %{{.*}} = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %[[ARG]] : i32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformSMax <Workgroup> <Reduce> %[[ARG]] : i32 -> i32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformUMax <Workgroup> <Reduce> %[[ARG]] : i32 -> i32
     %r0 = gpu.all_reduce maxsi %arg {} : (i32) -> (i32)
     %r1 = gpu.all_reduce maxui %arg {} : (i32) -> (i32)
     gpu.return
@@ -601,7 +601,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: f32)
   gpu.func @test(%arg : f32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformFMax "Subgroup" "Reduce" %[[ARG]] : f32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformFMax <Subgroup> <Reduce> %[[ARG]] : f32 -> f32
     %reduced = gpu.subgroup_reduce maxnumf %arg : (f32) -> (f32)
     gpu.return
   }
@@ -643,8 +643,8 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: i32)
   gpu.func @test(%arg : i32) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformSMax "Subgroup" "Reduce" %[[ARG]] : i32
-    // CHECK: %{{.*}} = spirv.GroupNonUniformUMax "Subgroup" "Reduce" %[[ARG]] : i32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformSMax <Subgroup> <Reduce> %[[ARG]] : i32 -> i32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformUMax <Subgroup> <Reduce> %[[ARG]] : i32 -> i32
     %r0 = gpu.subgroup_reduce maxsi %arg : (i32) -> (i32)
     %r1 = gpu.subgroup_reduce maxui %arg : (i32) -> (i32)
     gpu.return
@@ -665,7 +665,7 @@ gpu.module @kernels {
   //  CHECK-SAME: (%[[ARG:.*]]: i32)
   gpu.func @test(%arg : vector<1xi32>) kernel
     attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
-    // CHECK: %{{.*}} = spirv.GroupNonUniformSMax "Subgroup" "Reduce" %[[ARG]] : i32
+    // CHECK: %{{.*}} = spirv.GroupNonUniformSMax <Subgroup> <Reduce> %[[ARG]] : i32 -> i32
     %r0 = gpu.subgroup_reduce maxsi %arg : (vector<1xi32>) -> (vector<1xi32>)
     gpu.return
   }

diff  --git a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir
index 741081a37d8a08..c879b901311f21 100644
--- a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir
@@ -80,16 +80,16 @@ func.func @subgroup_ballot(%predicate: i1) -> vector<4xi32> {
 //===----------------------------------------------------------------------===//
 
 func.func @subgroup_block_read_intel(%ptr : !spirv.ptr<i32, StorageBuffer>) -> i32 {
-  // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : i32
-  %0 = spirv.INTEL.SubgroupBlockRead "StorageBuffer" %ptr : i32
+  // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : !spirv.ptr<i32, StorageBuffer> -> i32
+  %0 = spirv.INTEL.SubgroupBlockRead %ptr : !spirv.ptr<i32, StorageBuffer> -> i32
   return %0: i32
 }
 
 // -----
 
 func.func @subgroup_block_read_intel_vector(%ptr : !spirv.ptr<i32, StorageBuffer>) -> vector<3xi32> {
-  // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : vector<3xi32>
-  %0 = spirv.INTEL.SubgroupBlockRead "StorageBuffer" %ptr : vector<3xi32>
+  // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : !spirv.ptr<i32, StorageBuffer> -> vector<3xi32>
+  %0 = spirv.INTEL.SubgroupBlockRead %ptr : !spirv.ptr<i32, StorageBuffer> -> vector<3xi32>
   return %0: vector<3xi32>
 }
 

diff  --git a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
index d8a26c71d12f91..60ae1584d29fb9 100644
--- a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
@@ -150,16 +150,16 @@ func.func @group_non_uniform_elect() -> i1 {
 
 // CHECK-LABEL: @group_non_uniform_fadd_reduce
 func.func @group_non_uniform_fadd_reduce(%val: f32) -> f32 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %{{.+}} : f32
-  %0 = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %val : f32
+  // CHECK: %{{.+}} = spirv.GroupNonUniformFAdd <Workgroup> <Reduce> %{{.+}} : f32 -> f32
+  %0 = spirv.GroupNonUniformFAdd <Workgroup> <Reduce> %val : f32 -> f32
   return %0: f32
 }
 
 // CHECK-LABEL: @group_non_uniform_fadd_clustered_reduce
 func.func @group_non_uniform_fadd_clustered_reduce(%val: vector<2xf32>) -> vector<2xf32> {
   %four = spirv.Constant 4 : i32
-  // CHECK: %{{.+}} = spirv.GroupNonUniformFAdd "Workgroup" "ClusteredReduce" %{{.+}} cluster_size(%{{.+}}) : vector<2xf32>
-  %0 = spirv.GroupNonUniformFAdd "Workgroup" "ClusteredReduce" %val cluster_size(%four) : vector<2xf32>
+  // CHECK: %{{.+}} = spirv.GroupNonUniformFAdd <Workgroup> <ClusteredReduce> %{{.+}} cluster_size(%{{.+}}) : vector<2xf32>, i32 -> vector<2xf32>
+  %0 = spirv.GroupNonUniformFAdd <Workgroup> <ClusteredReduce> %val cluster_size(%four) : vector<2xf32>, i32 -> vector<2xf32>
   return %0: vector<2xf32>
 }
 
@@ -169,16 +169,16 @@ func.func @group_non_uniform_fadd_clustered_reduce(%val: vector<2xf32>) -> vecto
 
 // CHECK-LABEL: @group_non_uniform_fmul_reduce
 func.func @group_non_uniform_fmul_reduce(%val: f32) -> f32 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %{{.+}} : f32
-  %0 = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %val : f32
+  // CHECK: %{{.+}} = spirv.GroupNonUniformFMul <Workgroup> <Reduce> %{{.+}} : f32 -> f32
+  %0 = spirv.GroupNonUniformFMul <Workgroup> <Reduce> %val : f32 -> f32
   return %0: f32
 }
 
 // CHECK-LABEL: @group_non_uniform_fmul_clustered_reduce
 func.func @group_non_uniform_fmul_clustered_reduce(%val: vector<2xf32>) -> vector<2xf32> {
   %four = spirv.Constant 4 : i32
-  // CHECK: %{{.+}} = spirv.GroupNonUniformFMul "Workgroup" "ClusteredReduce" %{{.+}} cluster_size(%{{.+}}) : vector<2xf32>
-  %0 = spirv.GroupNonUniformFMul "Workgroup" "ClusteredReduce" %val cluster_size(%four) : vector<2xf32>
+  // CHECK: %{{.+}} = spirv.GroupNonUniformFMul <Workgroup> <ClusteredReduce> %{{.+}} cluster_size(%{{.+}}) : vector<2xf32>, i32 -> vector<2xf32>
+  %0 = spirv.GroupNonUniformFMul <Workgroup> <ClusteredReduce> %val cluster_size(%four) : vector<2xf32>, i32 -> vector<2xf32>
   return %0: vector<2xf32>
 }
 
@@ -190,8 +190,8 @@ func.func @group_non_uniform_fmul_clustered_reduce(%val: vector<2xf32>) -> vecto
 
 // CHECK-LABEL: @group_non_uniform_fmax_reduce
 func.func @group_non_uniform_fmax_reduce(%val: f32) -> f32 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %{{.+}} : f32
-  %0 = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %val : f32
+  // CHECK: %{{.+}} = spirv.GroupNonUniformFMax <Workgroup> <Reduce> %{{.+}} : f32 -> f32
+  %0 = spirv.GroupNonUniformFMax <Workgroup> <Reduce> %val : f32 -> f32
   return %0: f32
 }
 
@@ -203,8 +203,8 @@ func.func @group_non_uniform_fmax_reduce(%val: f32) -> f32 {
 
 // CHECK-LABEL: @group_non_uniform_fmin_reduce
 func.func @group_non_uniform_fmin_reduce(%val: f32) -> f32 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %{{.+}} : f32
-  %0 = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %val : f32
+  // CHECK: %{{.+}} = spirv.GroupNonUniformFMin <Workgroup> <Reduce> %{{.+}} : f32 -> f32
+  %0 = spirv.GroupNonUniformFMin <Workgroup> <Reduce> %val : f32 -> f32
   return %0: f32
 }
 
@@ -216,16 +216,16 @@ func.func @group_non_uniform_fmin_reduce(%val: f32) -> f32 {
 
 // CHECK-LABEL: @group_non_uniform_iadd_reduce
 func.func @group_non_uniform_iadd_reduce(%val: i32) -> i32 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %{{.+}} : i32
-  %0 = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %val : i32
+  // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+  %0 = spirv.GroupNonUniformIAdd <Workgroup> <Reduce> %val : i32 -> i32
   return %0: i32
 }
 
 // CHECK-LABEL: @group_non_uniform_iadd_clustered_reduce
 func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vector<2xi32> {
   %four = spirv.Constant 4 : i32
-  // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %{{.+}} cluster_size(%{{.+}}) : vector<2xi32>
-  %0 = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %val cluster_size(%four) : vector<2xi32>
+  // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd <Workgroup> <ClusteredReduce> %{{.+}} cluster_size(%{{.+}}) : vector<2xi32>, i32 -> vector<2xi32>
+  %0 = spirv.GroupNonUniformIAdd <Workgroup> <ClusteredReduce> %val cluster_size(%four) : vector<2xi32>, i32 -> vector<2xi32>
   return %0: vector<2xi32>
 }
 
@@ -233,7 +233,7 @@ func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vecto
 
 func.func @group_non_uniform_iadd_reduce(%val: i32) -> i32 {
   // expected-error @+1 {{execution scope must be 'Workgroup' or 'Subgroup'}}
-  %0 = spirv.GroupNonUniformIAdd "Device" "Reduce" %val : i32
+  %0 = spirv.GroupNonUniformIAdd <Device> <Reduce> %val : i32 -> i32
   return %0: i32
 }
 
@@ -241,7 +241,7 @@ func.func @group_non_uniform_iadd_reduce(%val: i32) -> i32 {
 
 func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vector<2xi32> {
   // expected-error @+1 {{cluster size operand must be provided for 'ClusteredReduce' group operation}}
-  %0 = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %val : vector<2xi32>
+  %0 = spirv.GroupNonUniformIAdd <Workgroup> <ClusteredReduce> %val : vector<2xi32> -> vector<2xi32>
   return %0: vector<2xi32>
 }
 
@@ -249,7 +249,7 @@ func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vecto
 
 func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>, %size: i32) -> vector<2xi32> {
   // expected-error @+1 {{cluster size operand must come from a constant op}}
-  %0 = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %val cluster_size(%size) : vector<2xi32>
+  %0 = spirv.GroupNonUniformIAdd <Workgroup> <ClusteredReduce> %val cluster_size(%size) : vector<2xi32>, i32 -> vector<2xi32>
   return %0: vector<2xi32>
 }
 
@@ -258,7 +258,7 @@ func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>, %size: i
 func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vector<2xi32> {
   %five = spirv.Constant 5 : i32
   // expected-error @+1 {{cluster size operand must be a power of two}}
-  %0 = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %val cluster_size(%five) : vector<2xi32>
+  %0 = spirv.GroupNonUniformIAdd <Workgroup> <ClusteredReduce> %val cluster_size(%five) : vector<2xi32>, i32 -> vector<2xi32>
   return %0: vector<2xi32>
 }
 
@@ -270,16 +270,16 @@ func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vecto
 
 // CHECK-LABEL: @group_non_uniform_imul_reduce
 func.func @group_non_uniform_imul_reduce(%val: i32) -> i32 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %{{.+}} : i32
-  %0 = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %val : i32
+  // CHECK: %{{.+}} = spirv.GroupNonUniformIMul <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+  %0 = spirv.GroupNonUniformIMul <Workgroup> <Reduce> %val : i32 -> i32
   return %0: i32
 }
 
 // CHECK-LABEL: @group_non_uniform_imul_clustered_reduce
 func.func @group_non_uniform_imul_clustered_reduce(%val: vector<2xi32>) -> vector<2xi32> {
   %four = spirv.Constant 4 : i32
-  // CHECK: %{{.+}} = spirv.GroupNonUniformIMul "Workgroup" "ClusteredReduce" %{{.+}} cluster_size(%{{.+}}) : vector<2xi32>
-  %0 = spirv.GroupNonUniformIMul "Workgroup" "ClusteredReduce" %val cluster_size(%four) : vector<2xi32>
+  // CHECK: %{{.+}} = spirv.GroupNonUniformIMul <Workgroup> <ClusteredReduce> %{{.+}} cluster_size(%{{.+}}) : vector<2xi32>, i32 -> vector<2xi32>
+  %0 = spirv.GroupNonUniformIMul <Workgroup> <ClusteredReduce> %val cluster_size(%four) : vector<2xi32>, i32 -> vector<2xi32>
   return %0: vector<2xi32>
 }
 
@@ -291,8 +291,8 @@ func.func @group_non_uniform_imul_clustered_reduce(%val: vector<2xi32>) -> vecto
 
 // CHECK-LABEL: @group_non_uniform_smax_reduce
 func.func @group_non_uniform_smax_reduce(%val: i32) -> i32 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %{{.+}} : i32
-  %0 = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %val : i32
+  // CHECK: %{{.+}} = spirv.GroupNonUniformSMax <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+  %0 = spirv.GroupNonUniformSMax <Workgroup> <Reduce> %val : i32 -> i32
   return %0: i32
 }
 
@@ -304,8 +304,8 @@ func.func @group_non_uniform_smax_reduce(%val: i32) -> i32 {
 
 // CHECK-LABEL: @group_non_uniform_smin_reduce
 func.func @group_non_uniform_smin_reduce(%val: i32) -> i32 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformSMin "Workgroup" "Reduce" %{{.+}} : i32
-  %0 = spirv.GroupNonUniformSMin "Workgroup" "Reduce" %val : i32
+  // CHECK: %{{.+}} = spirv.GroupNonUniformSMin <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+  %0 = spirv.GroupNonUniformSMin <Workgroup> <Reduce> %val : i32 -> i32
   return %0: i32
 }
 
@@ -461,8 +461,8 @@ func.func @group_non_uniform_shuffle(%val: vector<2xf32>, %id: si32) -> vector<2
 
 // CHECK-LABEL: @group_non_uniform_umax_reduce
 func.func @group_non_uniform_umax_reduce(%val: i32) -> i32 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %{{.+}} : i32
-  %0 = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %val : i32
+  // CHECK: %{{.+}} = spirv.GroupNonUniformUMax <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+  %0 = spirv.GroupNonUniformUMax <Workgroup> <Reduce> %val : i32 -> i32
   return %0: i32
 }
 
@@ -474,8 +474,8 @@ func.func @group_non_uniform_umax_reduce(%val: i32) -> i32 {
 
 // CHECK-LABEL: @group_non_uniform_umin_reduce
 func.func @group_non_uniform_umin_reduce(%val: i32) -> i32 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %{{.+}} : i32
-  %0 = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %val : i32
+  // CHECK: %{{.+}} = spirv.GroupNonUniformUMin <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+  %0 = spirv.GroupNonUniformUMin <Workgroup> <Reduce> %val : i32 -> i32
   return %0: i32
 }
 
@@ -487,8 +487,8 @@ func.func @group_non_uniform_umin_reduce(%val: i32) -> i32 {
 
 // CHECK-LABEL: @group_non_uniform_bitwise_and
 func.func @group_non_uniform_bitwise_and(%val: i32) -> i32 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformBitwiseAnd "Workgroup" "Reduce" %{{.+}} : i32
-  %0 = spirv.GroupNonUniformBitwiseAnd "Workgroup" "Reduce" %val : i32
+  // CHECK: %{{.+}} = spirv.GroupNonUniformBitwiseAnd <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+  %0 = spirv.GroupNonUniformBitwiseAnd <Workgroup> <Reduce> %val : i32 -> i32
   return %0: i32
 }
 
@@ -496,7 +496,7 @@ func.func @group_non_uniform_bitwise_and(%val: i32) -> i32 {
 
 func.func @group_non_uniform_bitwise_and(%val: i1) -> i1 {
   // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
-  %0 = spirv.GroupNonUniformBitwiseAnd "Workgroup" "Reduce" %val : i1
+  %0 = spirv.GroupNonUniformBitwiseAnd <Workgroup> <Reduce> %val : i1 -> i1
   return %0: i1
 }
 
@@ -508,8 +508,8 @@ func.func @group_non_uniform_bitwise_and(%val: i1) -> i1 {
 
 // CHECK-LABEL: @group_non_uniform_bitwise_or
 func.func @group_non_uniform_bitwise_or(%val: i32) -> i32 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformBitwiseOr "Workgroup" "Reduce" %{{.+}} : i32
-  %0 = spirv.GroupNonUniformBitwiseOr "Workgroup" "Reduce" %val : i32
+  // CHECK: %{{.+}} = spirv.GroupNonUniformBitwiseOr <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+  %0 = spirv.GroupNonUniformBitwiseOr <Workgroup> <Reduce> %val : i32 -> i32
   return %0: i32
 }
 
@@ -517,7 +517,7 @@ func.func @group_non_uniform_bitwise_or(%val: i32) -> i32 {
 
 func.func @group_non_uniform_bitwise_or(%val: i1) -> i1 {
   // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
-  %0 = spirv.GroupNonUniformBitwiseOr "Workgroup" "Reduce" %val : i1
+  %0 = spirv.GroupNonUniformBitwiseOr <Workgroup> <Reduce> %val : i1 -> i1
   return %0: i1
 }
 
@@ -529,8 +529,8 @@ func.func @group_non_uniform_bitwise_or(%val: i1) -> i1 {
 
 // CHECK-LABEL: @group_non_uniform_bitwise_xor
 func.func @group_non_uniform_bitwise_xor(%val: i32) -> i32 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformBitwiseXor "Workgroup" "Reduce" %{{.+}} : i32
-  %0 = spirv.GroupNonUniformBitwiseXor "Workgroup" "Reduce" %val : i32
+  // CHECK: %{{.+}} = spirv.GroupNonUniformBitwiseXor <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+  %0 = spirv.GroupNonUniformBitwiseXor <Workgroup> <Reduce> %val : i32 -> i32
   return %0: i32
 }
 
@@ -538,7 +538,7 @@ func.func @group_non_uniform_bitwise_xor(%val: i32) -> i32 {
 
 func.func @group_non_uniform_bitwise_xor(%val: i1) -> i1 {
   // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
-  %0 = spirv.GroupNonUniformBitwiseXor "Workgroup" "Reduce" %val : i1
+  %0 = spirv.GroupNonUniformBitwiseXor <Workgroup> <Reduce> %val : i1 -> i1
   return %0: i1
 }
 
@@ -550,8 +550,8 @@ func.func @group_non_uniform_bitwise_xor(%val: i1) -> i1 {
 
 // CHECK-LABEL: @group_non_uniform_logical_and
 func.func @group_non_uniform_logical_and(%val: i1) -> i1 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformLogicalAnd "Workgroup" "Reduce" %{{.+}} : i1
-  %0 = spirv.GroupNonUniformLogicalAnd "Workgroup" "Reduce" %val : i1
+  // CHECK: %{{.+}} = spirv.GroupNonUniformLogicalAnd <Workgroup> <Reduce> %{{.+}} : i1 -> i1
+  %0 = spirv.GroupNonUniformLogicalAnd <Workgroup> <Reduce> %val : i1 -> i1
   return %0: i1
 }
 
@@ -559,7 +559,7 @@ func.func @group_non_uniform_logical_and(%val: i1) -> i1 {
 
 func.func @group_non_uniform_logical_and(%val: i32) -> i32 {
   // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
-  %0 = spirv.GroupNonUniformLogicalAnd "Workgroup" "Reduce" %val : i32
+  %0 = spirv.GroupNonUniformLogicalAnd <Workgroup> <Reduce> %val : i32 -> i32
   return %0: i32
 }
 
@@ -571,8 +571,8 @@ func.func @group_non_uniform_logical_and(%val: i32) -> i32 {
 
 // CHECK-LABEL: @group_non_uniform_logical_or
 func.func @group_non_uniform_logical_or(%val: i1) -> i1 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformLogicalOr "Workgroup" "Reduce" %{{.+}} : i1
-  %0 = spirv.GroupNonUniformLogicalOr "Workgroup" "Reduce" %val : i1
+  // CHECK: %{{.+}} = spirv.GroupNonUniformLogicalOr <Workgroup> <Reduce> %{{.+}} : i1 -> i1
+  %0 = spirv.GroupNonUniformLogicalOr <Workgroup> <Reduce> %val : i1 -> i1
   return %0: i1
 }
 
@@ -580,7 +580,7 @@ func.func @group_non_uniform_logical_or(%val: i1) -> i1 {
 
 func.func @group_non_uniform_logical_or(%val: i32) -> i32 {
   // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
-  %0 = spirv.GroupNonUniformLogicalOr "Workgroup" "Reduce" %val : i32
+  %0 = spirv.GroupNonUniformLogicalOr <Workgroup> <Reduce> %val : i32 -> i32
   return %0: i32
 }
 
@@ -592,8 +592,8 @@ func.func @group_non_uniform_logical_or(%val: i32) -> i32 {
 
 // CHECK-LABEL: @group_non_uniform_logical_xor
 func.func @group_non_uniform_logical_xor(%val: i1) -> i1 {
-  // CHECK: %{{.+}} = spirv.GroupNonUniformLogicalXor "Workgroup" "Reduce" %{{.+}} : i1
-  %0 = spirv.GroupNonUniformLogicalXor "Workgroup" "Reduce" %val : i1
+  // CHECK: %{{.+}} = spirv.GroupNonUniformLogicalXor <Workgroup> <Reduce> %{{.+}} : i1 -> i1
+  %0 = spirv.GroupNonUniformLogicalXor <Workgroup> <Reduce> %val : i1 -> i1
   return %0: i1
 }
 
@@ -601,6 +601,6 @@ func.func @group_non_uniform_logical_xor(%val: i1) -> i1 {
 
 func.func @group_non_uniform_logical_xor(%val: i32) -> i32 {
   // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
-  %0 = spirv.GroupNonUniformLogicalXor "Workgroup" "Reduce" %val : i32
+  %0 = spirv.GroupNonUniformLogicalXor <Workgroup> <Reduce> %val : i32 -> i32
   return %0: i32
 }

diff  --git a/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir b/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
index 931034f3d5f6ea..ff5ac7cea8fc6d 100644
--- a/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
+++ b/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
@@ -101,7 +101,7 @@ spirv.module Logical GLSL450 attributes {
     #spirv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, #spirv.resource_limits<>>
 } {
   spirv.func @group_non_uniform_iadd(%val : i32) -> i32 "None" {
-    %0 = spirv.GroupNonUniformIAdd "Subgroup" "Reduce" %val : i32
+    %0 = spirv.GroupNonUniformIAdd <Subgroup> <Reduce> %val : i32 -> i32
     spirv.ReturnValue %0: i32
   }
 }
@@ -112,7 +112,7 @@ spirv.module Logical GLSL450 attributes {
     #spirv.vce<v1.3, [Shader, GroupNonUniformClustered, GroupNonUniformBallot], []>, #spirv.resource_limits<>>
 } {
   spirv.func @group_non_uniform_iadd(%val : i32) -> i32 "None" {
-    %0 = spirv.GroupNonUniformIAdd "Subgroup" "Reduce" %val : i32
+    %0 = spirv.GroupNonUniformIAdd <Subgroup> <Reduce> %val : i32 -> i32
     spirv.ReturnValue %0: i32
   }
 }

diff  --git a/mlir/test/Target/SPIRV/debug.mlir b/mlir/test/Target/SPIRV/debug.mlir
index 50c83d876bef38..d1cd71d65ca8d9 100644
--- a/mlir/test/Target/SPIRV/debug.mlir
+++ b/mlir/test/Target/SPIRV/debug.mlir
@@ -39,7 +39,7 @@ spirv.module Logical GLSL450 requires #spirv.vce<v1.0, [Shader], []> {
 
   spirv.func @group_non_uniform(%val: f32) "None" {
     // CHECK: loc({{".*debug.mlir"}}:42:10)
-    %0 = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %val : f32
+    %0 = spirv.GroupNonUniformFAdd <Workgroup> <Reduce> %val : f32 -> f32
     spirv.Return
   }
 

diff  --git a/mlir/test/Target/SPIRV/group-ops.mlir b/mlir/test/Target/SPIRV/group-ops.mlir
index dc07f8c8ef61fd..32da4d9c26bd17 100644
--- a/mlir/test/Target/SPIRV/group-ops.mlir
+++ b/mlir/test/Target/SPIRV/group-ops.mlir
@@ -21,14 +21,14 @@ spirv.module Logical GLSL450 requires #spirv.vce<v1.0, [Shader], []> {
   }
   // CHECK-LABEL: @subgroup_block_read_intel
   spirv.func @subgroup_block_read_intel(%ptr : !spirv.ptr<i32, StorageBuffer>) -> i32 "None" {
-    // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : i32
-    %0 = spirv.INTEL.SubgroupBlockRead "StorageBuffer" %ptr : i32
+    // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : !spirv.ptr<i32, StorageBuffer> -> i32
+    %0 = spirv.INTEL.SubgroupBlockRead %ptr : !spirv.ptr<i32, StorageBuffer> -> i32
     spirv.ReturnValue %0: i32
   }
   // CHECK-LABEL: @subgroup_block_read_intel_vector
   spirv.func @subgroup_block_read_intel_vector(%ptr : !spirv.ptr<i32, StorageBuffer>) -> vector<3xi32> "None" {
-    // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : vector<3xi32>
-    %0 = spirv.INTEL.SubgroupBlockRead "StorageBuffer" %ptr : vector<3xi32>
+    // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : !spirv.ptr<i32, StorageBuffer> -> vector<3xi32>
+    %0 = spirv.INTEL.SubgroupBlockRead %ptr : !spirv.ptr<i32, StorageBuffer> -> vector<3xi32>
     spirv.ReturnValue %0: vector<3xi32>
   }
   // CHECK-LABEL: @subgroup_block_write_intel

diff  --git a/mlir/test/Target/SPIRV/non-uniform-ops.mlir b/mlir/test/Target/SPIRV/non-uniform-ops.mlir
index 4a08de2e257906..3e78eaf8b03ef9 100644
--- a/mlir/test/Target/SPIRV/non-uniform-ops.mlir
+++ b/mlir/test/Target/SPIRV/non-uniform-ops.mlir
@@ -25,79 +25,79 @@ spirv.module Logical GLSL450 requires #spirv.vce<v1.0, [Shader], []> {
 
   // CHECK-LABEL: @group_non_uniform_fadd_reduce
   spirv.func @group_non_uniform_fadd_reduce(%val: f32) -> f32 "None" {
-    // CHECK: %{{.+}} = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %{{.+}} : f32
-    %0 = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %val : f32
+    // CHECK: %{{.+}} = spirv.GroupNonUniformFAdd <Workgroup> <Reduce> %{{.+}} : f32 -> f32
+    %0 = spirv.GroupNonUniformFAdd <Workgroup> <Reduce> %val : f32 -> f32
     spirv.ReturnValue %0: f32
   }
 
   // CHECK-LABEL: @group_non_uniform_fmax_reduce
   spirv.func @group_non_uniform_fmax_reduce(%val: f32) -> f32 "None" {
-    // CHECK: %{{.+}} = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %{{.+}} : f32
-    %0 = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %val : f32
+    // CHECK: %{{.+}} = spirv.GroupNonUniformFMax <Workgroup> <Reduce> %{{.+}} : f32 -> f32
+    %0 = spirv.GroupNonUniformFMax <Workgroup> <Reduce> %val : f32 -> f32
     spirv.ReturnValue %0: f32
   }
 
   // CHECK-LABEL: @group_non_uniform_fmin_reduce
   spirv.func @group_non_uniform_fmin_reduce(%val: f32) -> f32 "None" {
-    // CHECK: %{{.+}} = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %{{.+}} : f32
-    %0 = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %val : f32
+    // CHECK: %{{.+}} = spirv.GroupNonUniformFMin <Workgroup> <Reduce> %{{.+}} : f32 -> f32
+    %0 = spirv.GroupNonUniformFMin <Workgroup> <Reduce> %val : f32 -> f32
     spirv.ReturnValue %0: f32
   }
 
   // CHECK-LABEL: @group_non_uniform_fmul_reduce
   spirv.func @group_non_uniform_fmul_reduce(%val: f32) -> f32 "None" {
-    // CHECK: %{{.+}} = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %{{.+}} : f32
-    %0 = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %val : f32
+    // CHECK: %{{.+}} = spirv.GroupNonUniformFMul <Workgroup> <Reduce> %{{.+}} : f32 -> f32
+    %0 = spirv.GroupNonUniformFMul <Workgroup> <Reduce> %val : f32 -> f32
     spirv.ReturnValue %0: f32
   }
 
   // CHECK-LABEL: @group_non_uniform_iadd_reduce
   spirv.func @group_non_uniform_iadd_reduce(%val: i32) -> i32 "None" {
-    // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %{{.+}} : i32
-    %0 = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %val : i32
+    // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+    %0 = spirv.GroupNonUniformIAdd <Workgroup> <Reduce> %val : i32 -> i32
     spirv.ReturnValue %0: i32
   }
 
   // CHECK-LABEL: @group_non_uniform_iadd_clustered_reduce
   spirv.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vector<2xi32> "None" {
     %four = spirv.Constant 4 : i32
-    // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %{{.+}} cluster_size(%{{.+}}) : vector<2xi32>
-    %0 = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %val cluster_size(%four) : vector<2xi32>
+    // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd <Workgroup> <ClusteredReduce> %{{.+}} cluster_size(%{{.+}}) : vector<2xi32>, i32 -> vector<2xi32>
+    %0 = spirv.GroupNonUniformIAdd <Workgroup> <ClusteredReduce> %val cluster_size(%four) : vector<2xi32>, i32 -> vector<2xi32>
     spirv.ReturnValue %0: vector<2xi32>
   }
 
   // CHECK-LABEL: @group_non_uniform_imul_reduce
   spirv.func @group_non_uniform_imul_reduce(%val: i32) -> i32 "None" {
-    // CHECK: %{{.+}} = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %{{.+}} : i32
-    %0 = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %val : i32
+    // CHECK: %{{.+}} = spirv.GroupNonUniformIMul <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+    %0 = spirv.GroupNonUniformIMul <Workgroup> <Reduce> %val : i32 -> i32
     spirv.ReturnValue %0: i32
   }
 
   // CHECK-LABEL: @group_non_uniform_smax_reduce
   spirv.func @group_non_uniform_smax_reduce(%val: i32) -> i32 "None" {
-    // CHECK: %{{.+}} = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %{{.+}} : i32
-    %0 = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %val : i32
+    // CHECK: %{{.+}} = spirv.GroupNonUniformSMax <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+    %0 = spirv.GroupNonUniformSMax <Workgroup> <Reduce> %val : i32 -> i32
     spirv.ReturnValue %0: i32
   }
 
   // CHECK-LABEL: @group_non_uniform_smin_reduce
   spirv.func @group_non_uniform_smin_reduce(%val: i32) -> i32 "None" {
-    // CHECK: %{{.+}} = spirv.GroupNonUniformSMin "Workgroup" "Reduce" %{{.+}} : i32
-    %0 = spirv.GroupNonUniformSMin "Workgroup" "Reduce" %val : i32
+    // CHECK: %{{.+}} = spirv.GroupNonUniformSMin <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+    %0 = spirv.GroupNonUniformSMin <Workgroup> <Reduce> %val : i32 -> i32
     spirv.ReturnValue %0: i32
   }
 
   // CHECK-LABEL: @group_non_uniform_umax_reduce
   spirv.func @group_non_uniform_umax_reduce(%val: i32) -> i32 "None" {
-    // CHECK: %{{.+}} = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %{{.+}} : i32
-    %0 = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %val : i32
+    // CHECK: %{{.+}} = spirv.GroupNonUniformUMax <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+    %0 = spirv.GroupNonUniformUMax <Workgroup> <Reduce> %val : i32 -> i32
     spirv.ReturnValue %0: i32
   }
 
   // CHECK-LABEL: @group_non_uniform_umin_reduce
   spirv.func @group_non_uniform_umin_reduce(%val: i32) -> i32 "None" {
-    // CHECK: %{{.+}} = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %{{.+}} : i32
-    %0 = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %val : i32
+    // CHECK: %{{.+}} = spirv.GroupNonUniformUMin <Workgroup> <Reduce> %{{.+}} : i32 -> i32
+    %0 = spirv.GroupNonUniformUMin <Workgroup> <Reduce> %val : i32 -> i32
     spirv.ReturnValue %0: i32
   }
 


        


More information about the Mlir-commits mailing list