[Mlir-commits] [mlir] [mlir][spirv] Add spirv-to-llvm conversion for group operations (PR #115501)

Lukas Sommer llvmlistbot at llvm.org
Fri Nov 8 08:29:09 PST 2024


https://github.com/sommerlukas updated https://github.com/llvm/llvm-project/pull/115501

>From f938531fde790af18ef67590a657073666d8a52e Mon Sep 17 00:00:00 2001
From: Lukas Sommer <lukas.sommer at codeplay.com>
Date: Fri, 8 Nov 2024 15:15:11 +0000
Subject: [PATCH 1/3] [mlir][spirv] Add spirv-to-llvm conversion for group
 operations

Signed-off-by: Lukas Sommer <lukas.sommer at codeplay.com>
---
 .../Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp    | 240 +++++++++++++-
 .../SPIRVToLLVM/group-ops-to-llvm.mlir        | 312 ++++++++++++++++++
 .../SPIRVToLLVM/non-uniform-ops-to-llvm.mlir  | 247 ++++++++++++++
 3 files changed, 794 insertions(+), 5 deletions(-)
 create mode 100644 mlir/test/Conversion/SPIRVToLLVM/group-ops-to-llvm.mlir
 create mode 100644 mlir/test/Conversion/SPIRVToLLVM/non-uniform-ops-to-llvm.mlir

diff --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
index ba31936af5510d..3fd78d2b08a6bb 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
@@ -22,6 +22,7 @@
 #include "mlir/IR/BuiltinOps.h"
 #include "mlir/IR/PatternMatch.h"
 #include "mlir/Transforms/DialectConversion.h"
+#include "llvm/ADT/TypeSwitch.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/FormatVariadic.h"
 
@@ -1027,7 +1028,8 @@ class ReturnValuePattern : public SPIRVToLLVMConversion<spirv::ReturnValueOp> {
 static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable,
                                               StringRef name,
                                               ArrayRef<Type> paramTypes,
-                                              Type resultType) {
+                                              Type resultType,
+                                              bool convergent = true) {
   auto func = dyn_cast_or_null<LLVM::LLVMFuncOp>(
       SymbolTable::lookupSymbolIn(symbolTable, name));
   if (func)
@@ -1038,7 +1040,9 @@ static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable,
       symbolTable->getLoc(), name,
       LLVM::LLVMFunctionType::get(resultType, paramTypes));
   func.setCConv(LLVM::cconv::CConv::SPIR_FUNC);
-  func.setConvergent(true);
+  if (convergent) {
+    func.setConvergent(true);
+  }
   func.setNoUnwind(true);
   func.setWillReturn(true);
   return func;
@@ -1046,10 +1050,13 @@ static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable,
 
 static LLVM::CallOp createSPIRVBuiltinCall(Location loc, OpBuilder &builder,
                                            LLVM::LLVMFuncOp func,
-                                           ValueRange args) {
+                                           ValueRange args,
+                                           bool convergent = true) {
   auto call = builder.create<LLVM::CallOp>(loc, func, args);
   call.setCConv(func.getCConv());
-  call.setConvergentAttr(func.getConvergentAttr());
+  if (convergent) {
+    call.setConvergentAttr(func.getConvergentAttr());
+  }
   call.setNoUnwindAttr(func.getNoUnwindAttr());
   call.setWillReturnAttr(func.getWillReturnAttr());
   return call;
@@ -1089,6 +1096,186 @@ class ControlBarrierPattern
   }
 };
 
+namespace {
+
+StringRef getTypeMangling(Type type, bool isSigned) {
+  return llvm::TypeSwitch<Type, StringRef>(type)
+      .Case<Float16Type>([](auto) { return "Dh"; })
+      .template Case<Float32Type>([](auto) { return "f"; })
+      .template Case<Float64Type>([](auto) { return "d"; })
+      .template Case<IntegerType>([isSigned](IntegerType intTy) {
+        switch (intTy.getWidth()) {
+        case 1:
+          return "b";
+        case 8:
+          return (isSigned) ? "a" : "c";
+        case 16:
+          return (isSigned) ? "s" : "t";
+        case 32:
+          return (isSigned) ? "i" : "j";
+        case 64:
+          return (isSigned) ? "l" : "m";
+        default: {
+          assert(false && "Unsupported integer width");
+          return "";
+        }
+        }
+      })
+      .Default([](auto) {
+        assert(false && "No mangling defined");
+        return "";
+      });
+}
+
+template <typename ReduceOp>
+constexpr StringLiteral getGroupFuncName() {
+  assert(false && "No builtin defined");
+  return "";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupIAddOp>() {
+  return "_Z17__spirv_GroupIAddii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupFAddOp>() {
+  return "_Z17__spirv_GroupFAddii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupSMinOp>() {
+  return "_Z17__spirv_GroupSMinii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupUMinOp>() {
+  return "_Z17__spirv_GroupUMinii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupFMinOp>() {
+  return "_Z17__spirv_GroupFMinii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupSMaxOp>() {
+  return "_Z17__spirv_GroupSMaxii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupUMaxOp>() {
+  return "_Z17__spirv_GroupUMaxii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupFMaxOp>() {
+  return "_Z17__spirv_GroupFMaxii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformIAddOp>() {
+  return "_Z27__spirv_GroupNonUniformIAddii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformFAddOp>() {
+  return "_Z27__spirv_GroupNonUniformFAddii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformIMulOp>() {
+  return "_Z27__spirv_GroupNonUniformIMulii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformFMulOp>() {
+  return "_Z27__spirv_GroupNonUniformFMulii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformSMinOp>() {
+  return "_Z27__spirv_GroupNonUniformSMinii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformUMinOp>() {
+  return "_Z27__spirv_GroupNonUniformUMinii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformFMinOp>() {
+  return "_Z27__spirv_GroupNonUniformFMinii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformSMaxOp>() {
+  return "_Z27__spirv_GroupNonUniformSMaxii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformUMaxOp>() {
+  return "_Z27__spirv_GroupNonUniformUMaxii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformFMaxOp>() {
+  return "_Z27__spirv_GroupNonUniformFMaxii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformBitwiseAndOp>() {
+  return "_Z33__spirv_GroupNonUniformBitwiseAndii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformBitwiseOrOp>() {
+  return "_Z32__spirv_GroupNonUniformBitwiseOrii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformBitwiseXorOp>() {
+  return "_Z33__spirv_GroupNonUniformBitwiseXorii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformLogicalAndOp>() {
+  return "_Z33__spirv_GroupNonUniformLogicalAndii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformLogicalOrOp>() {
+  return "_Z32__spirv_GroupNonUniformLogicalOrii";
+}
+template <>
+constexpr StringLiteral getGroupFuncName<spirv::GroupNonUniformLogicalXorOp>() {
+  return "_Z33__spirv_GroupNonUniformLogicalXorii";
+}
+} // namespace
+
+template <typename ReduceOp, bool Signed = false, bool NonUniform = false>
+class GroupReducePattern : public SPIRVToLLVMConversion<ReduceOp> {
+public:
+  using SPIRVToLLVMConversion<ReduceOp>::SPIRVToLLVMConversion;
+
+  LogicalResult
+  matchAndRewrite(ReduceOp op, typename ReduceOp::Adaptor adaptor,
+                  ConversionPatternRewriter &rewriter) const override {
+
+    Type retTy = op.getResult().getType();
+    if (!retTy.isIntOrFloat()) {
+      return failure();
+    }
+    SmallString<20> funcName = getGroupFuncName<ReduceOp>();
+    funcName += getTypeMangling(retTy, false);
+
+    Type i32Ty = rewriter.getI32Type();
+    SmallVector<Type> paramTypes{i32Ty, i32Ty, retTy};
+    if constexpr (NonUniform) {
+      if (adaptor.getClusterSize()) {
+        funcName += "j";
+        paramTypes.push_back(i32Ty);
+      }
+    }
+
+    Operation *symbolTable =
+        op->template getParentWithTrait<OpTrait::SymbolTable>();
+
+    LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(
+        symbolTable, funcName, paramTypes, retTy, !NonUniform);
+
+    Location loc = op.getLoc();
+    Value scope = rewriter.create<LLVM::ConstantOp>(
+        loc, i32Ty, static_cast<int32_t>(adaptor.getExecutionScope()));
+    Value groupOp = rewriter.create<LLVM::ConstantOp>(
+        loc, i32Ty, static_cast<int32_t>(adaptor.getGroupOperation()));
+    SmallVector<Value> operands{scope, groupOp};
+    operands.append(adaptor.getOperands().begin(), adaptor.getOperands().end());
+
+    auto call =
+        createSPIRVBuiltinCall(loc, rewriter, func, operands, !NonUniform);
+    rewriter.replaceOp(op, call);
+    return success();
+  }
+};
+
 /// Converts `spirv.mlir.loop` to LLVM dialect. All blocks within selection
 /// should be reachable for conversion to succeed. The structure of the loop in
 /// LLVM dialect will be the following:
@@ -1722,7 +1909,50 @@ void mlir::populateSPIRVToLLVMConversionPatterns(
       ReturnPattern, ReturnValuePattern,
 
       // Barrier ops
-      ControlBarrierPattern>(patterns.getContext(), typeConverter);
+      ControlBarrierPattern,
+
+      // Group reduction operations
+      GroupReducePattern<spirv::GroupIAddOp>,
+      GroupReducePattern<spirv::GroupFAddOp>,
+      GroupReducePattern<spirv::GroupFMinOp>,
+      GroupReducePattern<spirv::GroupUMinOp>,
+      GroupReducePattern<spirv::GroupSMinOp, /*Signed*/ true>,
+      GroupReducePattern<spirv::GroupFMaxOp>,
+      GroupReducePattern<spirv::GroupUMaxOp>,
+      GroupReducePattern<spirv::GroupSMaxOp, /*Signed*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformIAddOp, /*Signed*/ false,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformFAddOp, /*Signed*/ false,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformIMulOp, /*Signed*/ false,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformFMulOp, /*Signed*/ false,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformSMinOp, /*Signed*/ true,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformUMinOp, /*Signed*/ false,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformFMinOp, /*Signed*/ false,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformSMaxOp, /*Signed*/ true,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformUMaxOp, /*Signed*/ false,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformFMaxOp, /*Signed*/ false,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformBitwiseAndOp, /*Signed*/ false,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformBitwiseOrOp, /*Signed*/ false,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformBitwiseXorOp, /*Signed*/ false,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformLogicalAndOp, /*Signed*/ false,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformLogicalOrOp, /*Signed*/ false,
+                         /*NonUniform*/ true>,
+      GroupReducePattern<spirv::GroupNonUniformLogicalXorOp, /*Signed*/ false,
+                         /*NonUniform*/ true>
+      >(patterns.getContext(), typeConverter);
 
   patterns.add<GlobalVariablePattern>(clientAPI, patterns.getContext(),
                                       typeConverter);
diff --git a/mlir/test/Conversion/SPIRVToLLVM/group-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/group-ops-to-llvm.mlir
new file mode 100644
index 00000000000000..8c8fc50349e795
--- /dev/null
+++ b/mlir/test/Conversion/SPIRVToLLVM/group-ops-to-llvm.mlir
@@ -0,0 +1,312 @@
+// RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
+
+// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
+
+// CHECK-LABEL:   llvm.func spir_funccc @_Z17__spirv_GroupSMaxiij(i32, i32, i32) -> i32 attributes {convergent, no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z17__spirv_GroupUMaxiij(i32, i32, i32) -> i32 attributes {convergent, no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z17__spirv_GroupFMaxiif(i32, i32, f32) -> f32 attributes {convergent, no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z17__spirv_GroupSMiniij(i32, i32, i32) -> i32 attributes {convergent, no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z17__spirv_GroupUMiniij(i32, i32, i32) -> i32 attributes {convergent, no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z17__spirv_GroupFMiniif(i32, i32, f32) -> f32 attributes {convergent, no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z17__spirv_GroupFAddiif(i32, i32, f32) -> f32 attributes {convergent, no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z17__spirv_GroupIAddiij(i32, i32, i32) -> i32 attributes {convergent, no_unwind, will_return}
+
+// CHECK-LABEL:   llvm.func @group_reduce_iadd(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupIAddiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_reduce_iadd(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupIAdd <Workgroup> <Reduce> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_reduce_fadd(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupFAddiif(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, f32) -> f32
+// CHECK:           llvm.return %[[VAL_3]] : f32
+// CHECK:         }
+spirv.func @group_reduce_fadd(%arg0: f32) -> f32 "None" {
+  %0 = spirv.GroupFAdd <Workgroup> <Reduce> %arg0 : f32
+  spirv.ReturnValue %0 : f32
+}
+
+// CHECK-LABEL:   llvm.func @group_reduce_fmin(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupFMiniif(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, f32) -> f32
+// CHECK:           llvm.return %[[VAL_3]] : f32
+// CHECK:         }
+spirv.func @group_reduce_fmin(%arg0: f32) -> f32 "None" {
+  %0 = spirv.GroupFMin <Workgroup> <Reduce> %arg0 : f32
+  spirv.ReturnValue %0 : f32
+}
+
+// CHECK-LABEL:   llvm.func @group_reduce_umin(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupUMiniij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_reduce_umin(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupUMin <Workgroup> <Reduce> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_reduce_smin(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupSMiniij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_reduce_smin(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupSMin <Workgroup> <Reduce> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_reduce_fmax(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupFMaxiif(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, f32) -> f32
+// CHECK:           llvm.return %[[VAL_3]] : f32
+// CHECK:         }
+spirv.func @group_reduce_fmax(%arg0: f32) -> f32 "None" {
+  %0 = spirv.GroupFMax <Workgroup> <Reduce> %arg0 : f32
+  spirv.ReturnValue %0 : f32
+}
+
+// CHECK-LABEL:   llvm.func @group_reduce_umax(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupUMaxiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_reduce_umax(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupUMax <Workgroup> <Reduce> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_reduce_smax(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupSMaxiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_reduce_smax(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupSMax <Workgroup> <Reduce> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_inclusive_scan_iadd(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupIAddiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_inclusive_scan_iadd(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupIAdd <Workgroup> <InclusiveScan> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_inclusive_scan_fadd(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupFAddiif(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, f32) -> f32
+// CHECK:           llvm.return %[[VAL_3]] : f32
+// CHECK:         }
+spirv.func @group_inclusive_scan_fadd(%arg0: f32) -> f32 "None" {
+  %0 = spirv.GroupFAdd <Workgroup> <InclusiveScan> %arg0 : f32
+  spirv.ReturnValue %0 : f32
+}
+
+// CHECK-LABEL:   llvm.func @group_inclusive_scan_fmin(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupFMiniif(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, f32) -> f32
+// CHECK:           llvm.return %[[VAL_3]] : f32
+// CHECK:         }
+spirv.func @group_inclusive_scan_fmin(%arg0: f32) -> f32 "None" {
+  %0 = spirv.GroupFMin <Workgroup> <InclusiveScan> %arg0 : f32
+  spirv.ReturnValue %0 : f32
+}
+
+// CHECK-LABEL:   llvm.func @group_inclusive_scan_umin(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupUMiniij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_inclusive_scan_umin(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupUMin <Workgroup> <InclusiveScan> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_inclusive_scan_smin(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupSMiniij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_inclusive_scan_smin(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupSMin <Workgroup> <InclusiveScan> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_inclusive_scan_fmax(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupFMaxiif(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, f32) -> f32
+// CHECK:           llvm.return %[[VAL_3]] : f32
+// CHECK:         }
+spirv.func @group_inclusive_scan_fmax(%arg0: f32) -> f32 "None" {
+  %0 = spirv.GroupFMax <Workgroup> <InclusiveScan> %arg0 : f32
+  spirv.ReturnValue %0 : f32
+}
+
+// CHECK-LABEL:   llvm.func @group_inclusive_scan_umax(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupUMaxiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_inclusive_scan_umax(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupUMax <Workgroup> <InclusiveScan> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_inclusive_scan_smax(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupSMaxiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_inclusive_scan_smax(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupSMax <Workgroup> <ExclusiveScan> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_exclusive_scan_iadd(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupIAddiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_exclusive_scan_iadd(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupIAdd <Workgroup> <ExclusiveScan> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_exclusive_scan_fadd(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupFAddiif(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, f32) -> f32
+// CHECK:           llvm.return %[[VAL_3]] : f32
+// CHECK:         }
+spirv.func @group_exclusive_scan_fadd(%arg0: f32) -> f32 "None" {
+  %0 = spirv.GroupFAdd <Workgroup> <ExclusiveScan> %arg0 : f32
+  spirv.ReturnValue %0 : f32
+}
+
+// CHECK-LABEL:   llvm.func @group_exclusive_scan_fmin(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupFMiniif(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, f32) -> f32
+// CHECK:           llvm.return %[[VAL_3]] : f32
+// CHECK:         }
+spirv.func @group_exclusive_scan_fmin(%arg0: f32) -> f32 "None" {
+  %0 = spirv.GroupFMin <Workgroup> <ExclusiveScan> %arg0 : f32
+  spirv.ReturnValue %0 : f32
+}
+
+// CHECK-LABEL:   llvm.func @group_exclusive_scan_umin(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupUMiniij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_exclusive_scan_umin(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupUMin <Workgroup> <ExclusiveScan> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_exclusive_scan_smin(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupSMiniij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_exclusive_scan_smin(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupSMin <Workgroup> <ExclusiveScan> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_exclusive_scan_fmax(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupFMaxiif(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, f32) -> f32
+// CHECK:           llvm.return %[[VAL_3]] : f32
+// CHECK:         }
+spirv.func @group_exclusive_scan_fmax(%arg0: f32) -> f32 "None" {
+  %0 = spirv.GroupFMax <Workgroup> <ExclusiveScan> %arg0 : f32
+  spirv.ReturnValue %0 : f32
+}
+
+// CHECK-LABEL:   llvm.func @group_exclusive_scan_umax(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupUMaxiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_exclusive_scan_umax(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupUMax <Workgroup> <ExclusiveScan> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @group_exclusive_scan_smax(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupSMaxiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @group_exclusive_scan_smax(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupSMax <Workgroup> <ExclusiveScan> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @subgroup_exclusive_scan_smax(
+// CHECK-SAME:                                            %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(2 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z17__spirv_GroupSMaxiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @subgroup_exclusive_scan_smax(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupSMax <Subgroup> <ExclusiveScan> %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
diff --git a/mlir/test/Conversion/SPIRVToLLVM/non-uniform-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/non-uniform-ops-to-llvm.mlir
new file mode 100644
index 00000000000000..ab937b2c7c620e
--- /dev/null
+++ b/mlir/test/Conversion/SPIRVToLLVM/non-uniform-ops-to-llvm.mlir
@@ -0,0 +1,247 @@
+// RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
+
+// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
+
+// CHECK-LABEL:   llvm.func spir_funccc @_Z33__spirv_GroupNonUniformLogicalXoriib(i32, i32, i1) -> i1 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z32__spirv_GroupNonUniformLogicalOriib(i32, i32, i1) -> i1 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z33__spirv_GroupNonUniformLogicalAndiib(i32, i32, i1) -> i1 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z33__spirv_GroupNonUniformBitwiseXoriij(i32, i32, i32) -> i32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z32__spirv_GroupNonUniformBitwiseOriij(i32, i32, i32) -> i32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z33__spirv_GroupNonUniformBitwiseAndiij(i32, i32, i32) -> i32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z27__spirv_GroupNonUniformSMaxiijj(i32, i32, i32, i32) -> i32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z27__spirv_GroupNonUniformFMaxiif(i32, i32, f32) -> f32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z27__spirv_GroupNonUniformUMaxiij(i32, i32, i32) -> i32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z27__spirv_GroupNonUniformSMaxiij(i32, i32, i32) -> i32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z27__spirv_GroupNonUniformFMiniifj(i32, i32, f32, i32) -> f32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z27__spirv_GroupNonUniformFMiniif(i32, i32, f32) -> f32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z27__spirv_GroupNonUniformUMiniij(i32, i32, i32) -> i32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z27__spirv_GroupNonUniformSMiniij(i32, i32, i32) -> i32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z27__spirv_GroupNonUniformFMuliif(i32, i32, f32) -> f32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z27__spirv_GroupNonUniformIMuliijj(i32, i32, i32, i32) -> i32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z27__spirv_GroupNonUniformFAddiifj(i32, i32, f32, i32) -> f32 attributes {no_unwind, will_return}
+// CHECK:         llvm.func spir_funccc @_Z27__spirv_GroupNonUniformIAddiij(i32, i32, i32) -> i32 attributes {no_unwind, will_return}
+
+// CHECK-LABEL:   llvm.func @non_uniform_iadd(
+// CHECK-SAME:                                %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z27__spirv_GroupNonUniformIAddiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @non_uniform_iadd(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupNonUniformIAdd "Subgroup" "Reduce" %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_fadd(
+// CHECK-SAME:                                %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(16 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_4:.*]] = llvm.call spir_funccc @_Z27__spirv_GroupNonUniformFAddiifj(%[[VAL_2]], %[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) {no_unwind, will_return} : (i32, i32, f32, i32) -> f32
+// CHECK:           llvm.return %[[VAL_4]] : f32
+// CHECK:         }
+spirv.func @non_uniform_fadd(%arg0: f32) -> f32 "None" {
+  %0 = spirv.Constant 16 : i32
+  %1 = spirv.GroupNonUniformFAdd "Subgroup" "ClusteredReduce" %arg0 cluster_size(%0) : f32
+  spirv.ReturnValue %1 : f32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_imul(
+// CHECK-SAME:                                %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(16 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_4:.*]] = llvm.call spir_funccc @_Z27__spirv_GroupNonUniformIMuliijj(%[[VAL_2]], %[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) {no_unwind, will_return} : (i32, i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_4]] : i32
+// CHECK:         }
+spirv.func @non_uniform_imul(%arg0: i32) -> i32 "None" {
+  %0 = spirv.Constant 16 : i32
+  %1 = spirv.GroupNonUniformIMul "Subgroup" "ClusteredReduce" %arg0 cluster_size(%0) : i32
+  spirv.ReturnValue %1 : i32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_fmul(
+// CHECK-SAME:                                %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z27__spirv_GroupNonUniformFMuliif(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, f32) -> f32
+// CHECK:           llvm.return %[[VAL_3]] : f32
+// CHECK:         }
+spirv.func @non_uniform_fmul(%arg0: f32) -> f32 "None" {
+  %0 = spirv.GroupNonUniformFMul "Subgroup" "Reduce" %arg0 : f32
+  spirv.ReturnValue %0 : f32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_smin(
+// CHECK-SAME:                                %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z27__spirv_GroupNonUniformSMiniij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @non_uniform_smin(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupNonUniformSMin "Subgroup" "Reduce" %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_umin(
+// CHECK-SAME:                                %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z27__spirv_GroupNonUniformUMiniij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @non_uniform_umin(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupNonUniformUMin "Subgroup" "Reduce" %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_fmin(
+// CHECK-SAME:                                %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z27__spirv_GroupNonUniformFMiniif(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, f32) -> f32
+// CHECK:           llvm.return %[[VAL_3]] : f32
+// CHECK:         }
+spirv.func @non_uniform_fmin(%arg0: f32) -> f32 "None" {
+  %0 = spirv.GroupNonUniformFMin "Subgroup" "Reduce" %arg0 : f32
+  spirv.ReturnValue %0 : f32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_fmin_cluster(
+// CHECK-SAME:                                        %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(16 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_4:.*]] = llvm.call spir_funccc @_Z27__spirv_GroupNonUniformFMiniifj(%[[VAL_2]], %[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) {no_unwind, will_return} : (i32, i32, f32, i32) -> f32
+// CHECK:           llvm.return %[[VAL_4]] : f32
+// CHECK:         }
+spirv.func @non_uniform_fmin_cluster(%arg0: f32) -> f32 "None" {
+  %0 = spirv.Constant 16 : i32
+  %1 = spirv.GroupNonUniformFMin "Subgroup" "ClusteredReduce" %arg0 cluster_size(%0) : f32
+  spirv.ReturnValue %1 : f32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_smax(
+// CHECK-SAME:                                %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z27__spirv_GroupNonUniformSMaxiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @non_uniform_smax(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupNonUniformSMax "Subgroup" "Reduce" %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_umax(
+// CHECK-SAME:                                %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z27__spirv_GroupNonUniformUMaxiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @non_uniform_umax(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupNonUniformUMax "Subgroup" "Reduce" %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_fmax(
+// CHECK-SAME:                                %[[VAL_0:.*]]: f32) -> f32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z27__spirv_GroupNonUniformFMaxiif(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, f32) -> f32
+// CHECK:           llvm.return %[[VAL_3]] : f32
+// CHECK:         }
+spirv.func @non_uniform_fmax(%arg0: f32) -> f32 "None" {
+  %0 = spirv.GroupNonUniformFMax "Subgroup" "Reduce" %arg0 : f32
+  spirv.ReturnValue %0 : f32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_smax_cluster(
+// CHECK-SAME:                                        %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(16 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_4:.*]] = llvm.call spir_funccc @_Z27__spirv_GroupNonUniformSMaxiijj(%[[VAL_2]], %[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) {no_unwind, will_return} : (i32, i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_4]] : i32
+// CHECK:         }
+spirv.func @non_uniform_smax_cluster(%arg0: i32) -> i32 "None" {
+  %0 = spirv.Constant 16 : i32
+  %1 = spirv.GroupNonUniformSMax "Subgroup" "ClusteredReduce" %arg0 cluster_size(%0) : i32
+  spirv.ReturnValue %1 : i32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_bitwise_and(
+// CHECK-SAME:                                       %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z33__spirv_GroupNonUniformBitwiseAndiij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @non_uniform_bitwise_and(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupNonUniformBitwiseAnd "Subgroup" "Reduce" %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_bitwise_or(
+// CHECK-SAME:                                      %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z32__spirv_GroupNonUniformBitwiseOriij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @non_uniform_bitwise_or(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupNonUniformBitwiseOr "Subgroup" "Reduce" %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_bitwise_xor(
+// CHECK-SAME:                                       %[[VAL_0:.*]]: i32) -> i32 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z33__spirv_GroupNonUniformBitwiseXoriij(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, i32) -> i32
+// CHECK:           llvm.return %[[VAL_3]] : i32
+// CHECK:         }
+spirv.func @non_uniform_bitwise_xor(%arg0: i32) -> i32 "None" {
+  %0 = spirv.GroupNonUniformBitwiseXor "Subgroup" "Reduce" %arg0 : i32
+  spirv.ReturnValue %0 : i32
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_logical_and(
+// CHECK-SAME:                                       %[[VAL_0:.*]]: i1) -> i1 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z33__spirv_GroupNonUniformLogicalAndiib(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, i1) -> i1
+// CHECK:           llvm.return %[[VAL_3]] : i1
+// CHECK:         }
+spirv.func @non_uniform_logical_and(%arg0: i1) -> i1 "None" {
+  %0 = spirv.GroupNonUniformLogicalAnd "Subgroup" "Reduce" %arg0 : i1
+  spirv.ReturnValue %0 : i1
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_logical_or(
+// CHECK-SAME:                                      %[[VAL_0:.*]]: i1) -> i1 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z32__spirv_GroupNonUniformLogicalOriib(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, i1) -> i1
+// CHECK:           llvm.return %[[VAL_3]] : i1
+// CHECK:         }
+spirv.func @non_uniform_logical_or(%arg0: i1) -> i1 "None" {
+  %0 = spirv.GroupNonUniformLogicalOr "Subgroup" "Reduce" %arg0 : i1
+  spirv.ReturnValue %0 : i1
+}
+
+// CHECK-LABEL:   llvm.func @non_uniform_logical_xor(
+// CHECK-SAME:                                       %[[VAL_0:.*]]: i1) -> i1 {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(3 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_3:.*]] = llvm.call spir_funccc @_Z33__spirv_GroupNonUniformLogicalXoriib(%[[VAL_1]], %[[VAL_2]], %[[VAL_0]]) {no_unwind, will_return} : (i32, i32, i1) -> i1
+// CHECK:           llvm.return %[[VAL_3]] : i1
+// CHECK:         }
+spirv.func @non_uniform_logical_xor(%arg0: i1) -> i1 "None" {
+  %0 = spirv.GroupNonUniformLogicalXor "Subgroup" "Reduce" %arg0 : i1
+  spirv.ReturnValue %0 : i1
+}
+

>From 8f9fc62022e87f5d7a66401d72e2c6bb08d89109 Mon Sep 17 00:00:00 2001
From: Lukas Sommer <lukas.sommer at codeplay.com>
Date: Fri, 8 Nov 2024 15:52:02 +0000
Subject: [PATCH 2/3] Code formatting

---
 mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
index 3fd78d2b08a6bb..b0047f8d088959 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
@@ -1951,8 +1951,8 @@ void mlir::populateSPIRVToLLVMConversionPatterns(
       GroupReducePattern<spirv::GroupNonUniformLogicalOrOp, /*Signed*/ false,
                          /*NonUniform*/ true>,
       GroupReducePattern<spirv::GroupNonUniformLogicalXorOp, /*Signed*/ false,
-                         /*NonUniform*/ true>
-      >(patterns.getContext(), typeConverter);
+                         /*NonUniform*/ true>>(patterns.getContext(),
+                                               typeConverter);
 
   patterns.add<GlobalVariablePattern>(clientAPI, patterns.getContext(),
                                       typeConverter);

>From 2d97df0c1f2920b66d0a46db06cf976bc056f079 Mon Sep 17 00:00:00 2001
From: Lukas Sommer <lukas.sommer at codeplay.com>
Date: Fri, 8 Nov 2024 16:27:49 +0000
Subject: [PATCH 3/3] Address PR feedback

Signed-off-by: Lukas Sommer <lukas.sommer at codeplay.com>
---
 .../Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp    | 104 ++++++++----------
 1 file changed, 47 insertions(+), 57 deletions(-)

diff --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
index b0047f8d088959..ef0508e7ef5f0e 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
@@ -1040,9 +1040,7 @@ static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable,
       symbolTable->getLoc(), name,
       LLVM::LLVMFunctionType::get(resultType, paramTypes));
   func.setCConv(LLVM::cconv::CConv::SPIR_FUNC);
-  if (convergent) {
-    func.setConvergent(true);
-  }
+  func.setConvergent(convergent);
   func.setNoUnwind(true);
   func.setWillReturn(true);
   return func;
@@ -1050,13 +1048,10 @@ static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable,
 
 static LLVM::CallOp createSPIRVBuiltinCall(Location loc, OpBuilder &builder,
                                            LLVM::LLVMFuncOp func,
-                                           ValueRange args,
-                                           bool convergent = true) {
+                                           ValueRange args) {
   auto call = builder.create<LLVM::CallOp>(loc, func, args);
   call.setCConv(func.getCConv());
-  if (convergent) {
-    call.setConvergentAttr(func.getConvergentAttr());
-  }
+  call.setConvergentAttr(func.getConvergentAttr());
   call.setNoUnwindAttr(func.getNoUnwindAttr());
   call.setWillReturnAttr(func.getWillReturnAttr());
   return call;
@@ -1101,9 +1096,9 @@ namespace {
 StringRef getTypeMangling(Type type, bool isSigned) {
   return llvm::TypeSwitch<Type, StringRef>(type)
       .Case<Float16Type>([](auto) { return "Dh"; })
-      .template Case<Float32Type>([](auto) { return "f"; })
-      .template Case<Float64Type>([](auto) { return "d"; })
-      .template Case<IntegerType>([isSigned](IntegerType intTy) {
+      .Case<Float32Type>([](auto) { return "f"; })
+      .Case<Float64Type>([](auto) { return "d"; })
+      .Case<IntegerType>([isSigned](IntegerType intTy) {
         switch (intTy.getWidth()) {
         case 1:
           return "b";
@@ -1115,23 +1110,19 @@ StringRef getTypeMangling(Type type, bool isSigned) {
           return (isSigned) ? "i" : "j";
         case 64:
           return (isSigned) ? "l" : "m";
-        default: {
-          assert(false && "Unsupported integer width");
-          return "";
-        }
+        default:
+          llvm_unreachable("Unsupported integer width");
         }
       })
       .Default([](auto) {
-        assert(false && "No mangling defined");
+        llvm_unreachable("No mangling defined");
         return "";
       });
 }
 
 template <typename ReduceOp>
-constexpr StringLiteral getGroupFuncName() {
-  assert(false && "No builtin defined");
-  return "";
-}
+constexpr StringLiteral getGroupFuncName();
+
 template <>
 constexpr StringLiteral getGroupFuncName<spirv::GroupIAddOp>() {
   return "_Z17__spirv_GroupIAddii";
@@ -1243,7 +1234,7 @@ class GroupReducePattern : public SPIRVToLLVMConversion<ReduceOp> {
     if (!retTy.isIntOrFloat()) {
       return failure();
     }
-    SmallString<20> funcName = getGroupFuncName<ReduceOp>();
+    SmallString<36> funcName = getGroupFuncName<ReduceOp>();
     funcName += getTypeMangling(retTy, false);
 
     Type i32Ty = rewriter.getI32Type();
@@ -1269,8 +1260,7 @@ class GroupReducePattern : public SPIRVToLLVMConversion<ReduceOp> {
     SmallVector<Value> operands{scope, groupOp};
     operands.append(adaptor.getOperands().begin(), adaptor.getOperands().end());
 
-    auto call =
-        createSPIRVBuiltinCall(loc, rewriter, func, operands, !NonUniform);
+    auto call = createSPIRVBuiltinCall(loc, rewriter, func, operands);
     rewriter.replaceOp(op, call);
     return success();
   }
@@ -1916,42 +1906,42 @@ void mlir::populateSPIRVToLLVMConversionPatterns(
       GroupReducePattern<spirv::GroupFAddOp>,
       GroupReducePattern<spirv::GroupFMinOp>,
       GroupReducePattern<spirv::GroupUMinOp>,
-      GroupReducePattern<spirv::GroupSMinOp, /*Signed*/ true>,
+      GroupReducePattern<spirv::GroupSMinOp, /*Signed=*/true>,
       GroupReducePattern<spirv::GroupFMaxOp>,
       GroupReducePattern<spirv::GroupUMaxOp>,
-      GroupReducePattern<spirv::GroupSMaxOp, /*Signed*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformIAddOp, /*Signed*/ false,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformFAddOp, /*Signed*/ false,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformIMulOp, /*Signed*/ false,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformFMulOp, /*Signed*/ false,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformSMinOp, /*Signed*/ true,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformUMinOp, /*Signed*/ false,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformFMinOp, /*Signed*/ false,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformSMaxOp, /*Signed*/ true,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformUMaxOp, /*Signed*/ false,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformFMaxOp, /*Signed*/ false,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformBitwiseAndOp, /*Signed*/ false,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformBitwiseOrOp, /*Signed*/ false,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformBitwiseXorOp, /*Signed*/ false,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformLogicalAndOp, /*Signed*/ false,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformLogicalOrOp, /*Signed*/ false,
-                         /*NonUniform*/ true>,
-      GroupReducePattern<spirv::GroupNonUniformLogicalXorOp, /*Signed*/ false,
-                         /*NonUniform*/ true>>(patterns.getContext(),
+      GroupReducePattern<spirv::GroupSMaxOp, /*Signed=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformIAddOp, /*Signed=*/false,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformFAddOp, /*Signed=*/false,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformIMulOp, /*Signed=*/false,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformFMulOp, /*Signed=*/false,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformSMinOp, /*Signed=*/true,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformUMinOp, /*Signed=*/false,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformFMinOp, /*Signed=*/false,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformSMaxOp, /*Signed=*/true,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformUMaxOp, /*Signed=*/false,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformFMaxOp, /*Signed=*/false,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformBitwiseAndOp, /*Signed=*/false,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformBitwiseOrOp, /*Signed=*/false,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformBitwiseXorOp, /*Signed=*/false,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformLogicalAndOp, /*Signed=*/false,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformLogicalOrOp, /*Signed=*/false,
+                         /*NonUniform=*/true>,
+      GroupReducePattern<spirv::GroupNonUniformLogicalXorOp, /*Signed=*/false,
+                         /*NonUniform=*/true>>(patterns.getContext(),
                                                typeConverter);
 
   patterns.add<GlobalVariablePattern>(clientAPI, patterns.getContext(),



More information about the Mlir-commits mailing list