[Mlir-commits] [mlir] 5a53add - [mlir] Add optimization attrs for gpu-to-llvmspv function declarations and calls (#99301)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Wed Jul 24 09:30:07 PDT 2024
Author: Finlay
Date: 2024-07-24T18:30:03+02:00
New Revision: 5a53add85a6d5be2d15eea32a3d06fec35e2c699
URL: https://github.com/llvm/llvm-project/commit/5a53add85a6d5be2d15eea32a3d06fec35e2c699
DIFF: https://github.com/llvm/llvm-project/commit/5a53add85a6d5be2d15eea32a3d06fec35e2c699.diff
LOG: [mlir] Add optimization attrs for gpu-to-llvmspv function declarations and calls (#99301)
Adds the attributes nounwind and willreturn to all function
declarations. Adds `memory(none)` equivalent to the id/dimension
function declarations. The function declaration attributes are copied to
the function calls.
`nounwind` is legal because there are no exception in SPIR-V. I also do
not see any reason why any of these functions would not return when used
correctly.
I'm confident that the get id/dim functions will have no externally
observable memory effects, but think the convergent functions will have
effects.
Added:
Modified:
mlir/lib/Conversion/GPUToLLVMSPV/GPUToLLVMSPV.cpp
mlir/test/Conversion/GPUToLLVMSPV/gpu-to-llvm-spv.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Conversion/GPUToLLVMSPV/GPUToLLVMSPV.cpp b/mlir/lib/Conversion/GPUToLLVMSPV/GPUToLLVMSPV.cpp
index ebeb8f803d71d..27d63b5f8948d 100644
--- a/mlir/lib/Conversion/GPUToLLVMSPV/GPUToLLVMSPV.cpp
+++ b/mlir/lib/Conversion/GPUToLLVMSPV/GPUToLLVMSPV.cpp
@@ -43,8 +43,8 @@ namespace mlir {
static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable,
StringRef name,
ArrayRef<Type> paramTypes,
- Type resultType,
- bool isConvergent = false) {
+ Type resultType, bool isMemNone,
+ bool isConvergent) {
auto func = dyn_cast_or_null<LLVM::LLVMFuncOp>(
SymbolTable::lookupSymbolIn(symbolTable, name));
if (!func) {
@@ -53,6 +53,18 @@ static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable,
symbolTable->getLoc(), name,
LLVM::LLVMFunctionType::get(resultType, paramTypes));
func.setCConv(LLVM::cconv::CConv::SPIR_FUNC);
+ func.setNoUnwind(true);
+ func.setWillReturn(true);
+
+ if (isMemNone) {
+ // no externally observable effects
+ constexpr auto noModRef = mlir::LLVM::ModRefInfo::NoModRef;
+ auto memAttr = b.getAttr<LLVM::MemoryEffectsAttr>(
+ /*other=*/noModRef,
+ /*argMem=*/noModRef, /*inaccessibleMem=*/noModRef);
+ func.setMemoryEffectsAttr(memAttr);
+ }
+
func.setConvergent(isConvergent);
}
return func;
@@ -64,6 +76,10 @@ static LLVM::CallOp createSPIRVBuiltinCall(Location loc,
ValueRange args) {
auto call = rewriter.create<LLVM::CallOp>(loc, func, args);
call.setCConv(func.getCConv());
+ call.setConvergentAttr(func.getConvergentAttr());
+ call.setNoUnwindAttr(func.getNoUnwindAttr());
+ call.setWillReturnAttr(func.getWillReturnAttr());
+ call.setMemoryEffectsAttr(func.getMemoryEffectsAttr());
return call;
}
@@ -91,8 +107,9 @@ struct GPUBarrierConversion final : ConvertOpToLLVMPattern<gpu::BarrierOp> {
assert(moduleOp && "Expecting module");
Type flagTy = rewriter.getI32Type();
Type voidTy = rewriter.getType<LLVM::LLVMVoidType>();
- LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(
- moduleOp, funcName, flagTy, voidTy, /*isConvergent=*/true);
+ LLVM::LLVMFuncOp func =
+ lookupOrCreateSPIRVFn(moduleOp, funcName, flagTy, voidTy,
+ /*isMemNone=*/false, /*isConvergent=*/true);
// Value used by SPIR-V backend to represent `CLK_LOCAL_MEM_FENCE`.
// See `llvm/lib/Target/SPIRV/SPIRVBuiltins.td`.
@@ -134,8 +151,9 @@ struct LaunchConfigConversion : ConvertToLLVMPattern {
assert(moduleOp && "Expecting module");
Type dimTy = rewriter.getI32Type();
Type indexTy = getTypeConverter()->getIndexType();
- LLVM::LLVMFuncOp func =
- lookupOrCreateSPIRVFn(moduleOp, funcName, dimTy, indexTy);
+ LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(moduleOp, funcName, dimTy,
+ indexTy, /*isMemNone=*/true,
+ /*isConvergent=*/false);
Location loc = op->getLoc();
gpu::Dimension dim = getDimension(op);
@@ -268,9 +286,9 @@ struct GPUShuffleConversion final : ConvertOpToLLVMPattern<gpu::ShuffleOp> {
Type valueType = adaptor.getValue().getType();
Type offsetType = adaptor.getOffset().getType();
Type resultType = valueType;
- LLVM::LLVMFuncOp func =
- lookupOrCreateSPIRVFn(moduleOp, funcName, {valueType, offsetType},
- resultType, /*isConvergent=*/true);
+ LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(
+ moduleOp, funcName, {valueType, offsetType}, resultType,
+ /*isMemNone=*/false, /*isConvergent=*/true);
Location loc = op->getLoc();
std::array<Value, 2> args{adaptor.getValue(), adaptor.getOffset()};
diff --git a/mlir/test/Conversion/GPUToLLVMSPV/gpu-to-llvm-spv.mlir b/mlir/test/Conversion/GPUToLLVMSPV/gpu-to-llvm-spv.mlir
index 1b0f89a9a573e..bd7e5d139b001 100644
--- a/mlir/test/Conversion/GPUToLLVMSPV/gpu-to-llvm-spv.mlir
+++ b/mlir/test/Conversion/GPUToLLVMSPV/gpu-to-llvm-spv.mlir
@@ -4,30 +4,70 @@
// RUN: | FileCheck --check-prefixes=CHECK-32,CHECK %s
gpu.module @builtins {
- // CHECK-64: llvm.func spir_funccc @_Z14get_num_groupsj(i32) -> i64
- // CHECK-64: llvm.func spir_funccc @_Z12get_local_idj(i32) -> i64
- // CHECK-64: llvm.func spir_funccc @_Z14get_local_sizej(i32) -> i64
- // CHECK-64: llvm.func spir_funccc @_Z13get_global_idj(i32) -> i64
- // CHECK-64: llvm.func spir_funccc @_Z12get_group_idj(i32) -> i64
- // CHECK-32: llvm.func spir_funccc @_Z14get_num_groupsj(i32) -> i32
- // CHECK-32: llvm.func spir_funccc @_Z12get_local_idj(i32) -> i32
- // CHECK-32: llvm.func spir_funccc @_Z14get_local_sizej(i32) -> i32
- // CHECK-32: llvm.func spir_funccc @_Z13get_global_idj(i32) -> i32
- // CHECK-32: llvm.func spir_funccc @_Z12get_group_idj(i32) -> i32
+ // CHECK-64: llvm.func spir_funccc @_Z14get_num_groupsj(i32) -> i64 attributes {
+ // CHECK-32: llvm.func spir_funccc @_Z14get_num_groupsj(i32) -> i32 attributes {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-SAME: }
+ // CHECK-64: llvm.func spir_funccc @_Z12get_local_idj(i32) -> i64 attributes {
+ // CHECK-32: llvm.func spir_funccc @_Z12get_local_idj(i32) -> i32 attributes {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-SAME: }
+ // CHECK-64: llvm.func spir_funccc @_Z14get_local_sizej(i32) -> i64 attributes {
+ // CHECK-32: llvm.func spir_funccc @_Z14get_local_sizej(i32) -> i32 attributes {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-SAME: }
+ // CHECK-64: llvm.func spir_funccc @_Z13get_global_idj(i32) -> i64 attributes {
+ // CHECK-32: llvm.func spir_funccc @_Z13get_global_idj(i32) -> i32 attributes {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-SAME: }
+ // CHECK-64: llvm.func spir_funccc @_Z12get_group_idj(i32) -> i64 attributes {
+ // CHECK-32: llvm.func spir_funccc @_Z12get_group_idj(i32) -> i32 attributes {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-SAME: }
// CHECK-LABEL: gpu_block_id
func.func @gpu_block_id() -> (index, index, index) {
// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z12get_group_idj([[C0]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z12get_group_idj([[C0]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z12get_group_idj([[C0]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%block_id_x = gpu.block_id x
// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z12get_group_idj([[C1]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z12get_group_idj([[C1]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z12get_group_idj([[C1]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%block_id_y = gpu.block_id y
// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z12get_group_idj([[C2]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z12get_group_idj([[C2]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z12get_group_idj([[C2]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%block_id_z = gpu.block_id z
return %block_id_x, %block_id_y, %block_id_z : index, index, index
}
@@ -35,16 +75,31 @@ gpu.module @builtins {
// CHECK-LABEL: gpu_global_id
func.func @gpu_global_id() -> (index, index, index) {
// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z13get_global_idj([[C0]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z13get_global_idj([[C0]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z13get_global_idj([[C0]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%global_id_x = gpu.global_id x
// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z13get_global_idj([[C1]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z13get_global_idj([[C1]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z13get_global_idj([[C1]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%global_id_y = gpu.global_id y
// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z13get_global_idj([[C2]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z13get_global_idj([[C2]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z13get_global_idj([[C2]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%global_id_z = gpu.global_id z
return %global_id_x, %global_id_y, %global_id_z : index, index, index
}
@@ -52,16 +107,31 @@ gpu.module @builtins {
// CHECK-LABEL: gpu_block_dim
func.func @gpu_block_dim() -> (index, index, index) {
// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z14get_local_sizej([[C0]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z14get_local_sizej([[C0]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z14get_local_sizej([[C0]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%block_dim_x = gpu.block_dim x
// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z14get_local_sizej([[C1]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z14get_local_sizej([[C1]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z14get_local_sizej([[C1]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%block_dim_y = gpu.block_dim y
// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z14get_local_sizej([[C2]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z14get_local_sizej([[C2]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z14get_local_sizej([[C2]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%block_dim_z = gpu.block_dim z
return %block_dim_x, %block_dim_y, %block_dim_z : index, index, index
}
@@ -69,16 +139,31 @@ gpu.module @builtins {
// CHECK-LABEL: gpu_thread_id
func.func @gpu_thread_id() -> (index, index, index) {
// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z12get_local_idj([[C0]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z12get_local_idj([[C0]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z12get_local_idj([[C0]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%thread_id_x = gpu.thread_id x
// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z12get_local_idj([[C1]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z12get_local_idj([[C1]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z12get_local_idj([[C1]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%thread_id_y = gpu.thread_id y
// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z12get_local_idj([[C2]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z12get_local_idj([[C2]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z12get_local_idj([[C2]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%thread_id_z = gpu.thread_id z
return %thread_id_x, %thread_id_y, %thread_id_z : index, index, index
}
@@ -86,16 +171,31 @@ gpu.module @builtins {
// CHECK-LABEL: gpu_grid_dim
func.func @gpu_grid_dim() -> (index, index, index) {
// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z14get_num_groupsj([[C0]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z14get_num_groupsj([[C0]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z14get_num_groupsj([[C0]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%grid_dim_x = gpu.grid_dim x
// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z14get_num_groupsj([[C1]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z14get_num_groupsj([[C1]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z14get_num_groupsj([[C1]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%grid_dim_y = gpu.grid_dim y
// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32
- // CHECK-64: llvm.call spir_funccc @_Z14get_num_groupsj([[C2]]) : (i32) -> i64
- // CHECK-32: llvm.call spir_funccc @_Z14get_num_groupsj([[C2]]) : (i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z14get_num_groupsj([[C2]]) {
+ // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: convergent
+ // CHECK-64-SAME: } : (i32) -> i64
+ // CHECK-32-SAME: } : (i32) -> i32
%grid_dim_z = gpu.grid_dim z
return %grid_dim_x, %grid_dim_y, %grid_dim_z : index, index, index
}
@@ -104,12 +204,22 @@ gpu.module @builtins {
// -----
gpu.module @barriers {
- // CHECK: llvm.func spir_funccc @_Z7barrierj(i32) attributes {convergent}
+ // CHECK: llvm.func spir_funccc @_Z7barrierj(i32) attributes {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects = #llvm.memory_effects
+ // CHECK-SAME: }
// CHECK-LABEL: gpu_barrier
func.func @gpu_barrier() {
// CHECK: [[FLAGS:%.*]] = llvm.mlir.constant(1 : i32) : i32
- // CHECK: llvm.call spir_funccc @_Z7barrierj([[FLAGS]]) : (i32) -> ()
+ // CHECK: llvm.call spir_funccc @_Z7barrierj([[FLAGS]]) {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects = #llvm.memory_effects
+ // CHECK-SAME: } : (i32) -> ()
gpu.barrier
return
}
@@ -120,10 +230,30 @@ gpu.module @barriers {
// Check `gpu.shuffle` conversion with default subgroup size.
gpu.module @shuffles {
- // CHECK: llvm.func spir_funccc @_Z22sub_group_shuffle_downdj(f64, i32) -> f64 attributes {convergent}
- // CHECK: llvm.func spir_funccc @_Z20sub_group_shuffle_upfj(f32, i32) -> f32 attributes {convergent}
- // CHECK: llvm.func spir_funccc @_Z21sub_group_shuffle_xorlj(i64, i32) -> i64 attributes {convergent}
- // CHECK: llvm.func spir_funccc @_Z17sub_group_shuffleij(i32, i32) -> i32 attributes {convergent}
+ // CHECK: llvm.func spir_funccc @_Z22sub_group_shuffle_downdj(f64, i32) -> f64 attributes {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects = #llvm.memory_effects
+ // CHECK-SAME: }
+ // CHECK: llvm.func spir_funccc @_Z20sub_group_shuffle_upfj(f32, i32) -> f32 attributes {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects = #llvm.memory_effects
+ // CHECK-SAME: }
+ // CHECK: llvm.func spir_funccc @_Z21sub_group_shuffle_xorlj(i64, i32) -> i64 attributes {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects = #llvm.memory_effects
+ // CHECK-SAME: }
+ // CHECK: llvm.func spir_funccc @_Z17sub_group_shuffleij(i32, i32) -> i32 attributes {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects = #llvm.memory_effects
+ // CHECK-SAME: }
// CHECK-LABEL: gpu_shuffles
// CHECK-SAME: (%[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: i32, %[[VAL_2:.*]]: i64, %[[VAL_3:.*]]: i32, %[[VAL_4:.*]]: f32, %[[VAL_5:.*]]: i32, %[[VAL_6:.*]]: f64, %[[VAL_7:.*]]: i32)
@@ -132,13 +262,33 @@ gpu.module @shuffles {
%val2: f32, %delta_up: i32,
%val3: f64, %delta_down: i32) {
%width = arith.constant 32 : i32
- // CHECK: llvm.call spir_funccc @_Z17sub_group_shuffleij(%[[VAL_0]], %[[VAL_1]]) : (i32, i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z17sub_group_shuffleij(%[[VAL_0]], %[[VAL_1]]) {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects = #llvm.memory_effects
+ // CHECK-SAME: } : (i32, i32) -> i32
// CHECK: llvm.mlir.constant(true) : i1
- // CHECK: llvm.call spir_funccc @_Z21sub_group_shuffle_xorlj(%[[VAL_2]], %[[VAL_3]]) : (i64, i32) -> i64
+ // CHECK: llvm.call spir_funccc @_Z21sub_group_shuffle_xorlj(%[[VAL_2]], %[[VAL_3]]) {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects = #llvm.memory_effects
+ // CHECK-SAME: } : (i64, i32) -> i64
// CHECK: llvm.mlir.constant(true) : i1
- // CHECK: llvm.call spir_funccc @_Z20sub_group_shuffle_upfj(%[[VAL_4]], %[[VAL_5]]) : (f32, i32) -> f32
+ // CHECK: llvm.call spir_funccc @_Z20sub_group_shuffle_upfj(%[[VAL_4]], %[[VAL_5]]) {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects= #llvm.memory_effects
+ // CHECK-SAME: } : (f32, i32) -> f32
// CHECK: llvm.mlir.constant(true) : i1
- // CHECK: llvm.call spir_funccc @_Z22sub_group_shuffle_downdj(%[[VAL_6]], %[[VAL_7]]) : (f64, i32) -> f64
+ // CHECK: llvm.call spir_funccc @_Z22sub_group_shuffle_downdj(%[[VAL_6]], %[[VAL_7]]) {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects= #llvm.memory_effects
+ // CHECK-SAME: } : (f64, i32) -> f64
// CHECK: llvm.mlir.constant(true) : i1
%shuffleResult0, %valid0 = gpu.shuffle idx %val0, %id, %width : i32
%shuffleResult1, %valid1 = gpu.shuffle xor %val1, %mask, %width : i64
@@ -155,10 +305,30 @@ gpu.module @shuffles {
gpu.module @shuffles attributes {
spirv.target_env = #spirv.target_env<#spirv.vce<v1.4, [Kernel, Addresses, GroupNonUniformShuffle, Int64], []>, #spirv.resource_limits<subgroup_size = 16>>
} {
- // CHECK: llvm.func spir_funccc @_Z22sub_group_shuffle_downdj(f64, i32) -> f64 attributes {convergent}
- // CHECK: llvm.func spir_funccc @_Z20sub_group_shuffle_upfj(f32, i32) -> f32 attributes {convergent}
- // CHECK: llvm.func spir_funccc @_Z21sub_group_shuffle_xorlj(i64, i32) -> i64 attributes {convergent}
- // CHECK: llvm.func spir_funccc @_Z17sub_group_shuffleij(i32, i32) -> i32 attributes {convergent}
+ // CHECK: llvm.func spir_funccc @_Z22sub_group_shuffle_downdj(f64, i32) -> f64 attributes {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects = #llvm.memory_effects
+ // CHECK-SAME: }
+ // CHECK: llvm.func spir_funccc @_Z20sub_group_shuffle_upfj(f32, i32) -> f32 attributes {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects = #llvm.memory_effects
+ // CHECK-SAME: }
+ // CHECK: llvm.func spir_funccc @_Z21sub_group_shuffle_xorlj(i64, i32) -> i64 attributes {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects = #llvm.memory_effects
+ // CHECK-SAME: }
+ // CHECK: llvm.func spir_funccc @_Z17sub_group_shuffleij(i32, i32) -> i32 attributes {
+ // CHECK-SAME-DAG: no_unwind
+ // CHECK-SAME-DAG: convergent
+ // CHECK-SAME-DAG: will_return
+ // CHECK-NOT: memory_effects = #llvm.memory_effects
+ // CHECK-SAME: }
// CHECK-LABEL: gpu_shuffles
// CHECK-SAME: (%[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: i32, %[[VAL_2:.*]]: i64, %[[VAL_3:.*]]: i32, %[[VAL_4:.*]]: f32, %[[VAL_5:.*]]: i32, %[[VAL_6:.*]]: f64, %[[VAL_7:.*]]: i32)
@@ -167,13 +337,13 @@ gpu.module @shuffles attributes {
%val2: f32, %delta_up: i32,
%val3: f64, %delta_down: i32) {
%width = arith.constant 16 : i32
- // CHECK: llvm.call spir_funccc @_Z17sub_group_shuffleij(%[[VAL_0]], %[[VAL_1]]) : (i32, i32) -> i32
+ // CHECK: llvm.call spir_funccc @_Z17sub_group_shuffleij(%[[VAL_0]], %[[VAL_1]])
// CHECK: llvm.mlir.constant(true) : i1
- // CHECK: llvm.call spir_funccc @_Z21sub_group_shuffle_xorlj(%[[VAL_2]], %[[VAL_3]]) : (i64, i32) -> i64
+ // CHECK: llvm.call spir_funccc @_Z21sub_group_shuffle_xorlj(%[[VAL_2]], %[[VAL_3]])
// CHECK: llvm.mlir.constant(true) : i1
- // CHECK: llvm.call spir_funccc @_Z20sub_group_shuffle_upfj(%[[VAL_4]], %[[VAL_5]]) : (f32, i32) -> f32
+ // CHECK: llvm.call spir_funccc @_Z20sub_group_shuffle_upfj(%[[VAL_4]], %[[VAL_5]])
// CHECK: llvm.mlir.constant(true) : i1
- // CHECK: llvm.call spir_funccc @_Z22sub_group_shuffle_downdj(%[[VAL_6]], %[[VAL_7]]) : (f64, i32) -> f64
+ // CHECK: llvm.call spir_funccc @_Z22sub_group_shuffle_downdj(%[[VAL_6]], %[[VAL_7]])
// CHECK: llvm.mlir.constant(true) : i1
%shuffleResult0, %valid0 = gpu.shuffle idx %val0, %id, %width : i32
%shuffleResult1, %valid1 = gpu.shuffle xor %val1, %mask, %width : i64
More information about the Mlir-commits
mailing list