[Mlir-commits] [mlir] 8d420fb - [spirv][nfc] Simplify resource limit with default values
Lei Zhang
llvmlistbot at llvm.org
Thu Sep 3 10:31:04 PDT 2020
Author: Lei Zhang
Date: 2020-09-03T13:29:26-04:00
New Revision: 8d420fb3a02d8ef61e43936c1e63d5556684b282
URL: https://github.com/llvm/llvm-project/commit/8d420fb3a02d8ef61e43936c1e63d5556684b282
DIFF: https://github.com/llvm/llvm-project/commit/8d420fb3a02d8ef61e43936c1e63d5556684b282.diff
LOG: [spirv][nfc] Simplify resource limit with default values
These deafult values are gotten from Vulkan required limits.
Reviewed By: hanchung
Differential Revision: https://reviews.llvm.org/D87090
Added:
Modified:
mlir/include/mlir/Dialect/SPIRV/TargetAndABI.td
mlir/test/Conversion/GPUToSPIRV/if.mlir
mlir/test/Conversion/GPUToSPIRV/load-store.mlir
mlir/test/Conversion/GPUToSPIRV/loop.mlir
mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir
mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
mlir/test/Conversion/StandardToSPIRV/alloc.mlir
mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
mlir/test/Conversion/StandardToSPIRV/std-types-to-spirv.mlir
mlir/test/Dialect/SPIRV/Transforms/abi-interface-opencl.mlir
mlir/test/Dialect/SPIRV/Transforms/abi-interface.mlir
mlir/test/Dialect/SPIRV/Transforms/abi-load-store.mlir
mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
mlir/test/Dialect/SPIRV/target-and-abi.mlir
mlir/test/Dialect/SPIRV/target-env.mlir
mlir/test/mlir-vulkan-runner/addf.mlir
mlir/test/mlir-vulkan-runner/addi.mlir
mlir/test/mlir-vulkan-runner/addi8.mlir
mlir/test/mlir-vulkan-runner/mulf.mlir
mlir/test/mlir-vulkan-runner/subf.mlir
mlir/test/mlir-vulkan-runner/time.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/SPIRV/TargetAndABI.td b/mlir/include/mlir/Dialect/SPIRV/TargetAndABI.td
index 231ec54f09f4..04fcc8e0b53e 100644
--- a/mlir/include/mlir/Dialect/SPIRV/TargetAndABI.td
+++ b/mlir/include/mlir/Dialect/SPIRV/TargetAndABI.td
@@ -39,12 +39,16 @@ def SPV_CapabilityArrayAttr : TypedArrayAttrBase<
// This attribute specifies the limits for various resources on the target
// architecture.
//
-// See https://renderdoc.org/vkspec_chunked/chap36.html#limits for the complete
-// list of limits and their explanation for the Vulkan API. The following ones
-// are those affecting SPIR-V CodeGen.
+// See https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#limits
+// for the complete list of limits and their explanation for the Vulkan API.
+// The following ones are those affecting SPIR-V CodeGen. Their default value
+// are the from Vulkan limit requirements:
+// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#limits-minmax
def SPV_ResourceLimitsAttr : StructAttr<"ResourceLimitsAttr", SPIRV_Dialect, [
- StructFieldAttr<"max_compute_workgroup_invocations", I32Attr>,
- StructFieldAttr<"max_compute_workgroup_size", I32ElementsAttr>
+ StructFieldAttr<"max_compute_workgroup_invocations",
+ DefaultValuedAttr<I32Attr, "128">>,
+ StructFieldAttr<"max_compute_workgroup_size",
+ DefaultValuedAttr<I32ElementsAttr, "{128, 128, 64}">>
]>;
#endif // SPIRV_TARGET_AND_ABI
diff --git a/mlir/test/Conversion/GPUToSPIRV/if.mlir b/mlir/test/Conversion/GPUToSPIRV/if.mlir
index b7e11d74996b..9651946118a6 100644
--- a/mlir/test/Conversion/GPUToSPIRV/if.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/if.mlir
@@ -3,9 +3,7 @@
module attributes {
gpu.container_module,
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
} {
func @main(%arg0 : memref<10xf32>, %arg1 : i1) {
%c0 = constant 1 : index
diff --git a/mlir/test/Conversion/GPUToSPIRV/load-store.mlir b/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
index da57db15bedc..b9ae8bdfeacd 100644
--- a/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
@@ -3,9 +3,7 @@
module attributes {
gpu.container_module,
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
} {
func @load_store(%arg0: memref<12x4xf32>, %arg1: memref<12x4xf32>, %arg2: memref<12x4xf32>) {
%c0 = constant 0 : index
diff --git a/mlir/test/Conversion/GPUToSPIRV/loop.mlir b/mlir/test/Conversion/GPUToSPIRV/loop.mlir
index 2205c60f875f..c181e1956f83 100644
--- a/mlir/test/Conversion/GPUToSPIRV/loop.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/loop.mlir
@@ -3,9 +3,7 @@
module attributes {
gpu.container_module,
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
} {
func @loop(%arg0 : memref<10xf32>, %arg1 : memref<10xf32>) {
%c0 = constant 1 : index
diff --git a/mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir b/mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir
index 1b5b4d52d8b8..0e2a45f9bf3c 100644
--- a/mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir
@@ -2,10 +2,7 @@
module attributes {
gpu.container_module,
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Kernel, Addresses], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [Kernel, Addresses], []>, {}>
} {
gpu.module @kernels {
// CHECK-LABEL: spv.module @{{.*}} Physical64 OpenCL
diff --git a/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir b/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
index cebd541977ef..d437ab160b92 100644
--- a/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
+++ b/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
@@ -16,11 +16,7 @@
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>,
- {
- max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>
- }>
+ #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, {}>
} {
// CHECK: spv.globalVariable
@@ -78,11 +74,7 @@ func @single_workgroup_reduction(%input: memref<16xi32>, %output: memref<1xi32>)
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>,
- {
- max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>
- }>
+ #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, {}>
} {
func @single_workgroup_reduction(%input: memref<16xi32>, %output: memref<1xi32>) {
// expected-error @+1 {{failed to legalize operation 'linalg.generic'}}
@@ -111,11 +103,7 @@ func @single_workgroup_reduction(%input: memref<16xi32>, %output: memref<1xi32>)
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>,
- {
- max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>
- }>
+ #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, {}>
} {
func @single_workgroup_reduction(%input: memref<16xi32>, %output: memref<1xi32>) attributes {
spv.entry_point_abi = {local_size = dense<[32, 1, 1]>: vector<3xi32>}
@@ -146,11 +134,7 @@ func @single_workgroup_reduction(%input: memref<16xi32>, %output: memref<1xi32>)
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>,
- {
- max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>
- }>
+ #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, {}>
} {
func @single_workgroup_reduction(%input: memref<16x8xi32>, %output: memref<16xi32>) attributes {
spv.entry_point_abi = {local_size = dense<[16, 8, 1]>: vector<3xi32>}
diff --git a/mlir/test/Conversion/StandardToSPIRV/alloc.mlir b/mlir/test/Conversion/StandardToSPIRV/alloc.mlir
index 14ce4699a455..ccd8c02e255a 100644
--- a/mlir/test/Conversion/StandardToSPIRV/alloc.mlir
+++ b/mlir/test/Conversion/StandardToSPIRV/alloc.mlir
@@ -6,9 +6,7 @@
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
}
{
func @alloc_dealloc_workgroup_mem(%arg0 : index, %arg1 : index) {
@@ -34,9 +32,7 @@ module attributes {
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
}
{
func @alloc_dealloc_workgroup_mem(%arg0 : index, %arg1 : index) {
@@ -65,9 +61,7 @@ module attributes {
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
}
{
func @two_allocs() {
@@ -88,9 +82,7 @@ module attributes {
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
}
{
func @two_allocs_vector() {
@@ -112,9 +104,7 @@ module attributes {
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
}
{
func @alloc_dealloc_dynamic_workgroup_mem(%arg0 : index) {
@@ -129,9 +119,7 @@ module attributes {
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
}
{
func @alloc_dealloc_mem() {
@@ -146,9 +134,7 @@ module attributes {
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
}
{
func @alloc_dealloc_dynamic_workgroup_mem(%arg0 : memref<4x?xf32, 3>) {
@@ -163,9 +149,7 @@ module attributes {
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
}
{
func @alloc_dealloc_mem(%arg0 : memref<4x5xf32>) {
diff --git a/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir b/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
index 1b83af1be755..ce38ba8b3f5e 100644
--- a/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
+++ b/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
@@ -6,9 +6,7 @@
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, {}>
} {
// Check integer operation conversions.
@@ -146,10 +144,7 @@ func @unsupported_2x2elem_vector(%arg0: vector<2x2xi32>) {
// Check that types are converted to 32-bit when no special capabilities.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: @int_vector23
@@ -177,10 +172,7 @@ func @float_scalar(%arg0: f16, %arg1: f64) {
// Check that types are converted to 32-bit when no special capabilities that
// are not supported.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
func @int_vector4_invalid(%arg0: vector<4xi64>) {
@@ -199,10 +191,7 @@ func @int_vector4_invalid(%arg0: vector<4xi64>) {
//===----------------------------------------------------------------------===//
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: @bitwise_scalar
@@ -348,9 +337,7 @@ func @boolcmpi(%arg0 : i1, %arg1 : i1) {
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, {}>
} {
// CHECK-LABEL: @constant
@@ -412,10 +399,7 @@ func @constant_64bit() {
// Check that constants are converted to 32-bit when no special capability.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: @constant_16bit
@@ -498,9 +482,7 @@ func @unsupported_cases() {
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, {}>
} {
// CHECK-LABEL: index_cast1
@@ -631,10 +613,7 @@ func @fptosi2(%arg0 : f16) -> i16 {
// Checks that cast types will be adjusted when no special capabilities for
// non-32-bit scalar types.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: @fpext1
@@ -682,9 +661,8 @@ func @sitofp(%arg0 : i64) {
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader, Int8, Int16, Int64, Float16, Float64], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader, Int8, Int16, Int64, Float16, Float64],
+ [SPV_KHR_storage_buffer_storage_class]>, {}>
} {
//===----------------------------------------------------------------------===//
@@ -750,9 +728,7 @@ func @load_store_zero_rank_int(%arg0: memref<i32>, %arg1: memref<i32>) {
// TODO: Test i1 and i64 types.
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
} {
// CHECK-LABEL: @load_i8
@@ -895,9 +871,7 @@ func @store_f32(%arg0: memref<f32>, %value: f32) {
module attributes {
spv.target_env = #spv.target_env<
#spv.vce<v1.0, [Int16, StorageBuffer16BitAccess, Shader],
- [SPV_KHR_storage_buffer_storage_class, SPV_KHR_16bit_storage]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ [SPV_KHR_storage_buffer_storage_class, SPV_KHR_16bit_storage]>, {}>
} {
// CHECK-LABEL: @load_i8
diff --git a/mlir/test/Conversion/StandardToSPIRV/std-types-to-spirv.mlir b/mlir/test/Conversion/StandardToSPIRV/std-types-to-spirv.mlir
index 5ea44c18c618..66b2ba97bea1 100644
--- a/mlir/test/Conversion/StandardToSPIRV/std-types-to-spirv.mlir
+++ b/mlir/test/Conversion/StandardToSPIRV/std-types-to-spirv.mlir
@@ -7,10 +7,7 @@
// Check that non-32-bit integer types are converted to 32-bit types if the
// corresponding capabilities are not available.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: spv.func @integer8
@@ -38,10 +35,7 @@ func @integer64(%arg0: i64, %arg1: si64, %arg2: ui64) { return }
// Check that non-32-bit integer types are kept untouched if the corresponding
// capabilities are available.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Int8, Int16, Int64], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [Int8, Int16, Int64], []>, {}>
} {
// CHECK-LABEL: spv.func @integer8
@@ -68,10 +62,7 @@ func @integer64(%arg0: i64, %arg1: si64, %arg2: ui64) { return }
// Check that weird bitwidths are not supported.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-NOT: spv.func @integer4
@@ -92,10 +83,7 @@ func @integer42(%arg0: i42) { return }
// The index type is always converted into i32.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: spv.func @index_type
@@ -113,10 +101,7 @@ func @index_type(%arg0: index) { return }
// Check that non-32-bit float types are converted to 32-bit types if the
// corresponding capabilities are not available.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: spv.func @float16
@@ -134,10 +119,7 @@ func @float64(%arg0: f64) { return }
// Check that non-32-bit float types are kept untouched if the corresponding
// capabilities are available.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Float16, Float64], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [Float16, Float64], []>, {}>
} {
// CHECK-LABEL: spv.func @float16
@@ -154,10 +136,7 @@ func @float64(%arg0: f64) { return }
// Check that bf16 is not supported.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-NOT: spv.func @bf16_type
@@ -174,10 +153,7 @@ func @bf16_type(%arg0: bf16) { return }
// Check that capabilities for scalar types affects vector types too: no special
// capabilities available means using turning element types to 32-bit.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: spv.func @int_vector
@@ -206,9 +182,7 @@ func @float_vector(
// special capabilities means keep vector types untouched.
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, {}>
} {
// CHECK-LABEL: spv.func @int_vector
@@ -235,10 +209,7 @@ func @float_vector(
// Check that 1- or > 4-element vectors are not supported.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-NOT: spv.func @one_element_vector
@@ -258,9 +229,7 @@ func @large_vector(%arg0: vector<1024xi32>) { return }
// Check memory spaces.
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [], [SPV_KHR_storage_buffer_storage_class]>, {}>
} {
// CHECK-LABEL: func @memref_mem_space
@@ -285,10 +254,7 @@ func @memref_mem_space(
// Check that boolean memref is not supported at the moment.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: func @memref_type({{%.*}}: memref<3xi1>)
@@ -304,10 +270,7 @@ func @memref_type(%arg0: memref<3xi1>) {
// requires special capability and extension: convert them to 32-bit if not
// satisfied.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: spv.func @memref_8bit_StorageBuffer
@@ -352,9 +315,7 @@ func @memref_16bit_Output(%arg4: memref<16xf16, 10>) { return }
module attributes {
spv.target_env = #spv.target_env<
#spv.vce<v1.0, [StoragePushConstant8, StoragePushConstant16],
- [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>, {}>
} {
// CHECK-LABEL: spv.func @memref_8bit_PushConstant
@@ -379,9 +340,7 @@ func @memref_16bit_PushConstant(
module attributes {
spv.target_env = #spv.target_env<
#spv.vce<v1.0, [StorageBuffer8BitAccess, StorageBuffer16BitAccess],
- [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>, {}>
} {
// CHECK-LABEL: spv.func @memref_8bit_StorageBuffer
@@ -406,9 +365,7 @@ func @memref_16bit_StorageBuffer(
module attributes {
spv.target_env = #spv.target_env<
#spv.vce<v1.0, [UniformAndStorageBuffer8BitAccess, StorageUniform16],
- [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>, {}>
} {
// CHECK-LABEL: spv.func @memref_8bit_Uniform
@@ -432,9 +389,7 @@ func @memref_16bit_Uniform(
// and extension is available.
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [StorageInputOutput16], [SPV_KHR_16bit_storage]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [StorageInputOutput16], [SPV_KHR_16bit_storage]>, {}>
} {
// CHECK-LABEL: spv.func @memref_16bit_Input
@@ -452,9 +407,7 @@ func @memref_16bit_Output(%arg4: memref<16xi16, 10>) { return }
// Check that memref offset and strides affect the array size.
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [StorageBuffer16BitAccess], [SPV_KHR_16bit_storage]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [StorageBuffer16BitAccess], [SPV_KHR_16bit_storage]>, {}>
} {
// CHECK-LABEL: spv.func @memref_offset_strides
@@ -488,10 +441,7 @@ func @memref_offset_strides(
// Dynamic shapes
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// Check that unranked shapes are not supported.
@@ -512,10 +462,7 @@ func @dynamic_dim_memref(%arg0: memref<8x?xi32>,
// Vector types
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: func @memref_vector
@@ -539,10 +486,7 @@ func @dynamic_dim_memref_vector(%arg0: memref<8x?xvector<4xi32>>,
// Vector types, check that sizes not available in SPIR-V are not transformed.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: func @memref_vector_wrong_size
@@ -562,9 +506,7 @@ func @memref_vector_wrong_size(
// Check that tensor element types are kept untouched with proper capabilities.
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, {}>
} {
// CHECK-LABEL: spv.func @int_tensor_types
@@ -595,10 +537,7 @@ func @float_tensor_types(
// Check that tensor element types are changed to 32-bit without capabilities.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: spv.func @int_tensor_types
@@ -629,10 +568,7 @@ func @float_tensor_types(
// Check that dynamic shapes are not supported.
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
} {
// CHECK-LABEL: func @unranked_tensor
diff --git a/mlir/test/Dialect/SPIRV/Transforms/abi-interface-opencl.mlir b/mlir/test/Dialect/SPIRV/Transforms/abi-interface-opencl.mlir
index 54b810f43aec..1de6b71d888d 100644
--- a/mlir/test/Dialect/SPIRV/Transforms/abi-interface-opencl.mlir
+++ b/mlir/test/Dialect/SPIRV/Transforms/abi-interface-opencl.mlir
@@ -1,10 +1,7 @@
// RUN: mlir-opt -spirv-lower-abi-attrs -verify-diagnostics %s -o - | FileCheck %s
module attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Kernel, Addresses], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [Kernel, Addresses], []>, {}>
} {
spv.module Physical64 OpenCL {
// CHECK-LABEL: spv.module
diff --git a/mlir/test/Dialect/SPIRV/Transforms/abi-interface.mlir b/mlir/test/Dialect/SPIRV/Transforms/abi-interface.mlir
index 28c44bf7b936..5b06745eba87 100644
--- a/mlir/test/Dialect/SPIRV/Transforms/abi-interface.mlir
+++ b/mlir/test/Dialect/SPIRV/Transforms/abi-interface.mlir
@@ -2,9 +2,7 @@
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
} {
// CHECK-LABEL: spv.module
diff --git a/mlir/test/Dialect/SPIRV/Transforms/abi-load-store.mlir b/mlir/test/Dialect/SPIRV/Transforms/abi-load-store.mlir
index 3d37f35b1c46..7d1a174fa367 100644
--- a/mlir/test/Dialect/SPIRV/Transforms/abi-load-store.mlir
+++ b/mlir/test/Dialect/SPIRV/Transforms/abi-load-store.mlir
@@ -2,9 +2,7 @@
module attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
} {
// CHECK-LABEL: spv.module
diff --git a/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir b/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
index 572db88e5f9e..74484fd7ab6b 100644
--- a/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
+++ b/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
@@ -10,9 +10,7 @@
// CHECK: requires #spv.vce<v1.0, [Shader], []>
spv.module Logical GLSL450 attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.5, [Shader], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.5, [Shader], []>, {}>
} {
spv.func @iadd(%val : i32) -> i32 "None" {
%0 = spv.IAdd %val, %val: i32
@@ -26,9 +24,7 @@ spv.module Logical GLSL450 attributes {
// CHECK: requires #spv.vce<v1.3, [GroupNonUniformBallot, Shader], []>
spv.module Logical GLSL450 attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.5, [Shader, GroupNonUniformBallot], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.5, [Shader, GroupNonUniformBallot], []>, {}>
} {
spv.func @group_non_uniform_ballot(%predicate : i1) -> vector<4xi32> "None" {
%0 = spv.GroupNonUniformBallot "Workgroup" %predicate : vector<4xi32>
@@ -45,9 +41,7 @@ spv.module Logical GLSL450 attributes {
// CHECK: requires #spv.vce<v1.0, [Shader], []>
spv.module Logical GLSL450 attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader, Float16, Float64, Int16, Int64, VariablePointers], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader, Float16, Float64, Int16, Int64, VariablePointers], []>, {}>
} {
spv.func @iadd(%val : i32) -> i32 "None" {
%0 = spv.IAdd %val, %val: i32
@@ -61,9 +55,7 @@ spv.module Logical GLSL450 attributes {
// CHECK: requires #spv.vce<v1.0, [Shader], []>
spv.module Logical GLSL450 attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [AtomicStorage], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [AtomicStorage], []>, {}>
} {
spv.func @iadd(%val : i32) -> i32 "None" {
%0 = spv.IAdd %val, %val: i32
@@ -84,9 +76,7 @@ spv.module Logical GLSL450 attributes {
// CHECK: requires #spv.vce<v1.3, [GroupNonUniformArithmetic, Shader], []>
spv.module Logical GLSL450 attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, {}>
} {
spv.func @group_non_uniform_iadd(%val : i32) -> i32 "None" {
%0 = spv.GroupNonUniformIAdd "Subgroup" "Reduce" %val : i32
@@ -97,9 +87,7 @@ spv.module Logical GLSL450 attributes {
// CHECK: requires #spv.vce<v1.3, [GroupNonUniformClustered, GroupNonUniformBallot, Shader], []>
spv.module Logical GLSL450 attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.3, [Shader, GroupNonUniformClustered, GroupNonUniformBallot], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.3, [Shader, GroupNonUniformClustered, GroupNonUniformBallot], []>, {}>
} {
spv.func @group_non_uniform_iadd(%val : i32) -> i32 "None" {
%0 = spv.GroupNonUniformIAdd "Subgroup" "Reduce" %val : i32
@@ -113,9 +101,7 @@ spv.module Logical GLSL450 attributes {
// CHECK: requires #spv.vce<v1.0, [Int8, Shader], []>
spv.module Logical GLSL450 attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.3, [Shader, Int8], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.3, [Shader, Int8], []>, {}>
} {
spv.func @iadd_function(%val : i8) -> i8 "None" {
%0 = spv.IAdd %val, %val : i8
@@ -127,9 +113,7 @@ spv.module Logical GLSL450 attributes {
// CHECK: requires #spv.vce<v1.0, [Float16, Shader], []>
spv.module Logical GLSL450 attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.3, [Shader, Float16], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.3, [Shader, Float16], []>, {}>
} {
spv.func @fadd_function(%val : f16) -> f16 "None" {
%0 = spv.FAdd %val, %val : f16
@@ -148,9 +132,7 @@ spv.module Logical GLSL450 attributes {
spv.module Logical GLSL450 attributes {
spv.target_env = #spv.target_env<
#spv.vce<v1.0, [Shader, SubgroupBallotKHR],
- [SPV_KHR_shader_ballot, SPV_KHR_shader_clock, SPV_KHR_variable_pointers]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ [SPV_KHR_shader_ballot, SPV_KHR_shader_clock, SPV_KHR_variable_pointers]>, {}>
} {
spv.func @subgroup_ballot(%predicate : i1) -> vector<4xi32> "None" {
%0 = spv.SubgroupBallotKHR %predicate: vector<4xi32>
@@ -165,9 +147,7 @@ spv.module Logical GLSL450 attributes {
// CHECK: requires #spv.vce<v1.0, [VulkanMemoryModel], [SPV_KHR_vulkan_memory_model]>
spv.module Logical Vulkan attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.5, [Shader, VulkanMemoryModel], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.5, [Shader, VulkanMemoryModel], []>, {}>
} {
spv.func @iadd(%val : i32) -> i32 "None" {
%0 = spv.IAdd %val, %val: i32
@@ -182,9 +162,7 @@ spv.module Logical Vulkan attributes {
// CHECK: requires #spv.vce<v1.0, [StorageBuffer16BitAccess, Shader, Int16], [SPV_KHR_16bit_storage, SPV_KHR_storage_buffer_storage_class]>
spv.module Logical GLSL450 attributes {
spv.target_env = #spv.target_env<
- #spv.vce<v1.3, [Shader, StorageBuffer16BitAccess, Int16], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.3, [Shader, StorageBuffer16BitAccess, Int16], []>, {}>
} {
spv.func @iadd_storage_buffer(%ptr : !spv.ptr<i16, StorageBuffer>) -> i16 "None" {
%0 = spv.Load "StorageBuffer" %ptr : i16
@@ -200,8 +178,7 @@ spv.module Logical GLSL450 attributes {
spv.module Logical GLSL450 attributes {
spv.target_env = #spv.target_env<
#spv.vce<v1.5, [Shader, UniformAndStorageBuffer8BitAccess, StorageBuffer16BitAccess, StorageUniform16, Int16, ImageBuffer, StorageImageExtendedFormats], []>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ {}>
} {
spv.globalVariable @data : !spv.ptr<!spv.struct<i8 [0], f16 [2], i64 [4]>, Uniform>
spv.globalVariable @img : !spv.ptr<!spv.image<f32, Buffer, NoDepth, NonArrayed, SingleSampled, SamplerUnknown, Rg32f>, UniformConstant>
diff --git a/mlir/test/Dialect/SPIRV/target-and-abi.mlir b/mlir/test/Dialect/SPIRV/target-and-abi.mlir
index 8d11f4ca0c64..cd338752600a 100644
--- a/mlir/test/Dialect/SPIRV/target-and-abi.mlir
+++ b/mlir/test/Dialect/SPIRV/target-and-abi.mlir
@@ -104,15 +104,6 @@ func @interface_var(
// spv.target_env
//===----------------------------------------------------------------------===//
-func @target_env_missing_limits() attributes {
- spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- // expected-error @+1 {{limits must be a dictionary attribute containing two 32-bit integer attributes 'max_compute_workgroup_invocations' and 'max_compute_workgroup_size'}}
- {max_compute_workgroup_size = dense<[128, 64, 64]> : vector<3xi32>}>
-} { return }
-
-// -----
-
func @target_env_wrong_limits() attributes {
spv.target_env = #spv.target_env<
#spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
diff --git a/mlir/test/Dialect/SPIRV/target-env.mlir b/mlir/test/Dialect/SPIRV/target-env.mlir
index 27c4e8d04092..c0bc02fae089 100644
--- a/mlir/test/Dialect/SPIRV/target-env.mlir
+++ b/mlir/test/Dialect/SPIRV/target-env.mlir
@@ -35,7 +35,7 @@
// CHECK-LABEL: @cmp_exchange_weak_suitable_version_capabilities
func @cmp_exchange_weak_suitable_version_capabilities(%ptr: !spv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.1, [Kernel, AtomicStorage], []>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.1, [Kernel, AtomicStorage], []>, {}>
} {
// CHECK: spv.AtomicCompareExchangeWeak "Workgroup" "AcquireRelease|AtomicCounterMemory" "Acquire"
%0 = "test.convert_to_atomic_compare_exchange_weak_op"(%ptr, %value, %comparator): (!spv.ptr<i32, Workgroup>, i32, i32) -> (i32)
@@ -44,7 +44,7 @@ func @cmp_exchange_weak_suitable_version_capabilities(%ptr: !spv.ptr<i32, Workgr
// CHECK-LABEL: @cmp_exchange_weak_unsupported_version
func @cmp_exchange_weak_unsupported_version(%ptr: !spv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.4, [Kernel, AtomicStorage], []>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.4, [Kernel, AtomicStorage], []>, {}>
} {
// CHECK: test.convert_to_atomic_compare_exchange_weak_op
%0 = "test.convert_to_atomic_compare_exchange_weak_op"(%ptr, %value, %comparator): (!spv.ptr<i32, Workgroup>, i32, i32) -> (i32)
@@ -57,7 +57,7 @@ func @cmp_exchange_weak_unsupported_version(%ptr: !spv.ptr<i32, Workgroup>, %val
// CHECK-LABEL: @group_non_uniform_ballot_suitable_version
func @group_non_uniform_ballot_suitable_version(%predicate: i1) -> vector<4xi32> attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.4, [GroupNonUniformBallot], []>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.4, [GroupNonUniformBallot], []>, {}>
} {
// CHECK: spv.GroupNonUniformBallot "Workgroup"
%0 = "test.convert_to_group_non_uniform_ballot_op"(%predicate): (i1) -> (vector<4xi32>)
@@ -66,7 +66,7 @@ func @group_non_uniform_ballot_suitable_version(%predicate: i1) -> vector<4xi32>
// CHECK-LABEL: @group_non_uniform_ballot_unsupported_version
func @group_non_uniform_ballot_unsupported_version(%predicate: i1) -> vector<4xi32> attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.1, [GroupNonUniformBallot], []>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.1, [GroupNonUniformBallot], []>, {}>
} {
// CHECK: test.convert_to_group_non_uniform_ballot_op
%0 = "test.convert_to_group_non_uniform_ballot_op"(%predicate): (i1) -> (vector<4xi32>)
@@ -79,7 +79,7 @@ func @group_non_uniform_ballot_unsupported_version(%predicate: i1) -> vector<4xi
// CHECK-LABEL: @cmp_exchange_weak_missing_capability_kernel
func @cmp_exchange_weak_missing_capability_kernel(%ptr: !spv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.3, [AtomicStorage], []>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.3, [AtomicStorage], []>, {}>
} {
// CHECK: test.convert_to_atomic_compare_exchange_weak_op
%0 = "test.convert_to_atomic_compare_exchange_weak_op"(%ptr, %value, %comparator): (!spv.ptr<i32, Workgroup>, i32, i32) -> (i32)
@@ -88,7 +88,7 @@ func @cmp_exchange_weak_missing_capability_kernel(%ptr: !spv.ptr<i32, Workgroup>
// CHECK-LABEL: @cmp_exchange_weak_missing_capability_atomic_storage
func @cmp_exchange_weak_missing_capability_atomic_storage(%ptr: !spv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.3, [Kernel], []>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.3, [Kernel], []>, {}>
} {
// CHECK: test.convert_to_atomic_compare_exchange_weak_op
%0 = "test.convert_to_atomic_compare_exchange_weak_op"(%ptr, %value, %comparator): (!spv.ptr<i32, Workgroup>, i32, i32) -> (i32)
@@ -97,7 +97,7 @@ func @cmp_exchange_weak_missing_capability_atomic_storage(%ptr: !spv.ptr<i32, Wo
// CHECK-LABEL: @subgroup_ballot_missing_capability
func @subgroup_ballot_missing_capability(%predicate: i1) -> vector<4xi32> attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.4, [], [SPV_KHR_shader_ballot]>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.4, [], [SPV_KHR_shader_ballot]>, {}>
} {
// CHECK: test.convert_to_subgroup_ballot_op
%0 = "test.convert_to_subgroup_ballot_op"(%predicate): (i1) -> (vector<4xi32>)
@@ -106,7 +106,7 @@ func @subgroup_ballot_missing_capability(%predicate: i1) -> vector<4xi32> attrib
// CHECK-LABEL: @bit_reverse_directly_implied_capability
func @bit_reverse_directly_implied_capability(%operand: i32) -> i32 attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.0, [Geometry], []>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [Geometry], []>, {}>
} {
// CHECK: spv.BitReverse
%0 = "test.convert_to_bit_reverse_op"(%operand): (i32) -> (i32)
@@ -115,7 +115,7 @@ func @bit_reverse_directly_implied_capability(%operand: i32) -> i32 attributes {
// CHECK-LABEL: @bit_reverse_recursively_implied_capability
func @bit_reverse_recursively_implied_capability(%operand: i32) -> i32 attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.0, [GeometryPointSize], []>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [GeometryPointSize], []>, {}>
} {
// CHECK: spv.BitReverse
%0 = "test.convert_to_bit_reverse_op"(%operand): (i32) -> (i32)
@@ -128,7 +128,7 @@ func @bit_reverse_recursively_implied_capability(%operand: i32) -> i32 attribute
// CHECK-LABEL: @subgroup_ballot_suitable_extension
func @subgroup_ballot_suitable_extension(%predicate: i1) -> vector<4xi32> attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.4, [SubgroupBallotKHR], [SPV_KHR_shader_ballot]>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.4, [SubgroupBallotKHR], [SPV_KHR_shader_ballot]>, {}>
} {
// CHECK: spv.SubgroupBallotKHR
%0 = "test.convert_to_subgroup_ballot_op"(%predicate): (i1) -> (vector<4xi32>)
@@ -137,7 +137,7 @@ func @subgroup_ballot_suitable_extension(%predicate: i1) -> vector<4xi32> attrib
// CHECK-LABEL: @subgroup_ballot_missing_extension
func @subgroup_ballot_missing_extension(%predicate: i1) -> vector<4xi32> attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.4, [SubgroupBallotKHR], []>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.4, [SubgroupBallotKHR], []>, {}>
} {
// CHECK: test.convert_to_subgroup_ballot_op
%0 = "test.convert_to_subgroup_ballot_op"(%predicate): (i1) -> (vector<4xi32>)
@@ -146,7 +146,7 @@ func @subgroup_ballot_missing_extension(%predicate: i1) -> vector<4xi32> attribu
// CHECK-LABEL: @module_suitable_extension1
func @module_suitable_extension1() attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.0, [VulkanMemoryModel, PhysicalStorageBufferAddresses], [SPV_KHR_vulkan_memory_model, SPV_EXT_physical_storage_buffer]>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [VulkanMemoryModel, PhysicalStorageBufferAddresses], [SPV_KHR_vulkan_memory_model, SPV_EXT_physical_storage_buffer]>, {}>
} {
// CHECK: spv.module PhysicalStorageBuffer64 Vulkan
"test.convert_to_module_op"() : () ->()
@@ -155,7 +155,7 @@ func @module_suitable_extension1() attributes {
// CHECK-LABEL: @module_suitable_extension2
func @module_suitable_extension2() attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.0, [VulkanMemoryModel, PhysicalStorageBufferAddresses], [SPV_KHR_vulkan_memory_model, SPV_KHR_physical_storage_buffer]>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [VulkanMemoryModel, PhysicalStorageBufferAddresses], [SPV_KHR_vulkan_memory_model, SPV_KHR_physical_storage_buffer]>, {}>
} {
// CHECK: spv.module PhysicalStorageBuffer64 Vulkan
"test.convert_to_module_op"() : () -> ()
@@ -164,7 +164,7 @@ func @module_suitable_extension2() attributes {
// CHECK-LABEL: @module_missing_extension_mm
func @module_missing_extension_mm() attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.0, [VulkanMemoryModel, PhysicalStorageBufferAddresses], [SPV_KHR_physical_storage_buffer]>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [VulkanMemoryModel, PhysicalStorageBufferAddresses], [SPV_KHR_physical_storage_buffer]>, {}>
} {
// CHECK: test.convert_to_module_op
"test.convert_to_module_op"() : () -> ()
@@ -173,7 +173,7 @@ func @module_missing_extension_mm() attributes {
// CHECK-LABEL: @module_missing_extension_am
func @module_missing_extension_am() attributes {
- spv.target_env = #spv.target_env<#spv.vce<v1.0, [VulkanMemoryModel, PhysicalStorageBufferAddresses], [SPV_KHR_vulkan_memory_model]>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [VulkanMemoryModel, PhysicalStorageBufferAddresses], [SPV_KHR_vulkan_memory_model]>, {}>
} {
// CHECK: test.convert_to_module_op
"test.convert_to_module_op"() : () -> ()
@@ -183,7 +183,7 @@ func @module_missing_extension_am() attributes {
// CHECK-LABEL: @module_implied_extension
func @module_implied_extension() attributes {
// Version 1.5 implies SPV_KHR_vulkan_memory_model and SPV_KHR_physical_storage_buffer.
- spv.target_env = #spv.target_env<#spv.vce<v1.5, [VulkanMemoryModel, PhysicalStorageBufferAddresses], []>, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ spv.target_env = #spv.target_env<#spv.vce<v1.5, [VulkanMemoryModel, PhysicalStorageBufferAddresses], []>, {}>
} {
// CHECK: spv.module PhysicalStorageBuffer64 Vulkan
"test.convert_to_module_op"() : () -> ()
diff --git a/mlir/test/mlir-vulkan-runner/addf.mlir b/mlir/test/mlir-vulkan-runner/addf.mlir
index 73622e37ade5..6cb7cdec3442 100644
--- a/mlir/test/mlir-vulkan-runner/addf.mlir
+++ b/mlir/test/mlir-vulkan-runner/addf.mlir
@@ -4,9 +4,7 @@
module attributes {
gpu.container_module,
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
} {
gpu.module @kernels {
gpu.func @kernel_add(%arg0 : memref<8xf32>, %arg1 : memref<8xf32>, %arg2 : memref<8xf32>)
diff --git a/mlir/test/mlir-vulkan-runner/addi.mlir b/mlir/test/mlir-vulkan-runner/addi.mlir
index c690120718b2..696c5015565d 100644
--- a/mlir/test/mlir-vulkan-runner/addi.mlir
+++ b/mlir/test/mlir-vulkan-runner/addi.mlir
@@ -4,9 +4,7 @@
module attributes {
gpu.container_module,
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
} {
gpu.module @kernels {
gpu.func @kernel_addi(%arg0 : memref<8xi32>, %arg1 : memref<8x8xi32>, %arg2 : memref<8x8x8xi32>)
diff --git a/mlir/test/mlir-vulkan-runner/addi8.mlir b/mlir/test/mlir-vulkan-runner/addi8.mlir
index 094186d5731d..eeb522285696 100644
--- a/mlir/test/mlir-vulkan-runner/addi8.mlir
+++ b/mlir/test/mlir-vulkan-runner/addi8.mlir
@@ -4,9 +4,7 @@
module attributes {
gpu.container_module,
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class, SPV_KHR_8bit_storage]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class, SPV_KHR_8bit_storage]>, {}>
} {
gpu.module @kernels {
gpu.func @kernel_addi(%arg0 : memref<8xi8>, %arg1 : memref<8x8xi8>, %arg2 : memref<8x8x8xi32>)
diff --git a/mlir/test/mlir-vulkan-runner/mulf.mlir b/mlir/test/mlir-vulkan-runner/mulf.mlir
index be0bd5afb425..0abcb53ebfe6 100644
--- a/mlir/test/mlir-vulkan-runner/mulf.mlir
+++ b/mlir/test/mlir-vulkan-runner/mulf.mlir
@@ -4,9 +4,7 @@
module attributes {
gpu.container_module,
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
} {
gpu.module @kernels {
gpu.func @kernel_mul(%arg0 : memref<4x4xf32>, %arg1 : memref<4x4xf32>, %arg2 : memref<4x4xf32>)
diff --git a/mlir/test/mlir-vulkan-runner/subf.mlir b/mlir/test/mlir-vulkan-runner/subf.mlir
index 5fc7e0a91d29..77c1f8841e8b 100644
--- a/mlir/test/mlir-vulkan-runner/subf.mlir
+++ b/mlir/test/mlir-vulkan-runner/subf.mlir
@@ -4,9 +4,7 @@
module attributes {
gpu.container_module,
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
} {
gpu.module @kernels {
gpu.func @kernel_sub(%arg0 : memref<8x4x4xf32>, %arg1 : memref<4x4xf32>, %arg2 : memref<8x4x4xf32>)
diff --git a/mlir/test/mlir-vulkan-runner/time.mlir b/mlir/test/mlir-vulkan-runner/time.mlir
index 9a96d7f819fd..21b4b76d1df0 100644
--- a/mlir/test/mlir-vulkan-runner/time.mlir
+++ b/mlir/test/mlir-vulkan-runner/time.mlir
@@ -7,9 +7,7 @@
module attributes {
gpu.container_module,
spv.target_env = #spv.target_env<
- #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>,
- {max_compute_workgroup_invocations = 128 : i32,
- max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}>
+ #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, {}>
} {
gpu.module @kernels {
gpu.func @kernel_add(%arg0 : memref<16384xf32>, %arg1 : memref<16384xf32>, %arg2 : memref<16384xf32>)
More information about the Mlir-commits
mailing list