[Mlir-commits] [mlir] [mlir][spirv] Add OpExtension "SPV_INTEL_tensor_float32_conversion " (PR #151337)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Aug 1 07:50:28 PDT 2025
https://github.com/YixingZhang007 updated https://github.com/llvm/llvm-project/pull/151337
>From 02e73b2412b774cd4d7eae420801e21de24e7a7c Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Wed, 30 Jul 2025 06:46:00 -0700
Subject: [PATCH 01/14] add the mlir support for
SPV_INTEL_tensor_float32_conversion extension
---
.../mlir/Dialect/SPIRV/IR/SPIRVBase.td | 17 ++++--
.../mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td | 54 +++++++++++++++++++
mlir/lib/Dialect/SPIRV/IR/CastOps.cpp | 21 ++++++++
mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir | 36 +++++++++++++
mlir/test/Target/SPIRV/intel-ext-ops.mlir | 22 ++++++++
5 files changed, 147 insertions(+), 3 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
index 90383265002a3..9c9eefd054fa6 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
@@ -405,6 +405,7 @@ def SPV_INTEL_memory_access_aliasing : I32EnumAttrCase<"SPV_INTEL_me
def SPV_INTEL_split_barrier : I32EnumAttrCase<"SPV_INTEL_split_barrier", 4029>;
def SPV_INTEL_bfloat16_conversion : I32EnumAttrCase<"SPV_INTEL_bfloat16_conversion", 4031>;
def SPV_INTEL_cache_controls : I32EnumAttrCase<"SPV_INTEL_cache_controls", 4032>;
+def SPV_INTEL_tensor_float32_conversion : I32EnumAttrCase<"SPV_INTEL_tensor_float32_conversion", 4033>;
def SPV_NV_compute_shader_derivatives : I32EnumAttrCase<"SPV_NV_compute_shader_derivatives", 5000>;
def SPV_NV_cooperative_matrix : I32EnumAttrCase<"SPV_NV_cooperative_matrix", 5001>;
@@ -474,7 +475,8 @@ def SPIRV_ExtensionAttr :
SPV_NV_shader_image_footprint, SPV_NV_shader_sm_builtins,
SPV_NV_shader_subgroup_partitioned, SPV_NV_shading_rate,
SPV_NV_stereo_view_rendering, SPV_NV_viewport_array2, SPV_NV_bindless_texture,
- SPV_NV_ray_tracing_motion_blur, SPV_NVX_multiview_per_view_attributes
+ SPV_NV_ray_tracing_motion_blur, SPV_NVX_multiview_per_view_attributes,
+ SPV_INTEL_tensor_float32_conversion
]>;
//===----------------------------------------------------------------------===//
@@ -1465,6 +1467,12 @@ def SPIRV_C_Bfloat16ConversionINTEL : I32EnumAttrCase<"B
];
}
+def SPIRV_C_TensorFloat32RoundingINTEL : I32EnumAttrCase<"TensorFloat32RoundingINTEL", 6425> {
+ list<Availability> availability = [
+ Extension<[SPV_INTEL_tensor_float32_conversion]>
+ ];
+}
+
def SPIRV_C_CacheControlsINTEL : I32EnumAttrCase<"CacheControlsINTEL", 6441> {
list<Availability> availability = [
Extension<[SPV_INTEL_cache_controls]>
@@ -1567,7 +1575,8 @@ def SPIRV_CapabilityAttr :
SPIRV_C_ShaderViewportIndexLayerEXT, SPIRV_C_ShaderViewportMaskNV,
SPIRV_C_ShaderStereoViewNV, SPIRV_C_Bfloat16ConversionINTEL,
SPIRV_C_CacheControlsINTEL, SPIRV_C_BFloat16TypeKHR,
- SPIRV_C_BFloat16DotProductKHR, SPIRV_C_BFloat16CooperativeMatrixKHR
+ SPIRV_C_BFloat16DotProductKHR, SPIRV_C_BFloat16CooperativeMatrixKHR,
+ SPIRV_C_TensorFloat32RoundingINTEL
]>;
def SPIRV_AM_Logical : I32EnumAttrCase<"Logical", 0>;
@@ -4586,6 +4595,7 @@ def SPIRV_OC_OpControlBarrierArriveINTEL : I32EnumAttrCase<"OpControlBarrie
def SPIRV_OC_OpControlBarrierWaitINTEL : I32EnumAttrCase<"OpControlBarrierWaitINTEL", 6143>;
def SPIRV_OC_OpGroupIMulKHR : I32EnumAttrCase<"OpGroupIMulKHR", 6401>;
def SPIRV_OC_OpGroupFMulKHR : I32EnumAttrCase<"OpGroupFMulKHR", 6402>;
+def SPIRV_OC_OpRoundFToTF32INTEL : I32EnumAttrCase<"OpRoundFToTF32INTEL", 6426>;
def SPIRV_OpcodeAttr :
SPIRV_I32EnumAttr<"Opcode", "valid SPIR-V instructions", "opcode", [
@@ -4690,7 +4700,8 @@ def SPIRV_OpcodeAttr :
SPIRV_OC_OpAssumeTrueKHR, SPIRV_OC_OpAtomicFAddEXT,
SPIRV_OC_OpConvertFToBF16INTEL, SPIRV_OC_OpConvertBF16ToFINTEL,
SPIRV_OC_OpControlBarrierArriveINTEL, SPIRV_OC_OpControlBarrierWaitINTEL,
- SPIRV_OC_OpGroupIMulKHR, SPIRV_OC_OpGroupFMulKHR
+ SPIRV_OC_OpGroupIMulKHR, SPIRV_OC_OpGroupFMulKHR,
+ SPIRV_OC_OpRoundFToTF32INTEL
]>;
// End opcode section. Generated from SPIR-V spec; DO NOT MODIFY!
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
index 82d26e365fb24..b692c07122683 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
@@ -11,6 +11,7 @@
// at (https://github.com/intel/llvm)
// Supported extensions
// * SPV_INTEL_bfloat16_conversion
+// * SPV_INTEL_tensor_float32_conversion
//===----------------------------------------------------------------------===//
@@ -110,6 +111,59 @@ def SPIRV_INTELConvertBF16ToFOp : SPIRV_IntelVendorOp<"ConvertBF16ToF", []> {
let hasVerifier = 1;
}
+// -----
+
+def SPIRV_INTELRoundFToTF32Op : SPIRV_IntelVendorOp<"RoundFToTF32", []> {
+ let summary = "See extension SPV_INTEL_tensor_float32_conversion";
+
+ let description = [{
+ Convert value numerically from a 32-bit floating point type to tensor float32,
+ with rounding to the nearest even.
+
+ Result Type must be a scalar or vector of 32-bit floating-point type.
+ The component width must be 32 bits. Bit pattern in the Result represents a tensor float32 value.
+
+ Float Value must be a scalar or vector of floating-point type.
+ It must have the same number of components as Result Type. The component width must be 32 bits.
+
+ Results are computed per component.
+
+
+ ```
+ convert-f-to-tf32-op ::= ssa-id `=` `spirv.INTEL.RoundFToTF32` ssa-use
+ `:` operand-type `to` result-type
+ ```
+
+ #### Example:
+
+ ```mlir
+ %1 = spirv.RoundFToTF32 %0 : f32 to f32
+ %3 = spirv.RoundFToTF32 %2 : vector<3xf32> to vector<3xf32>
+ ```
+
+ }];
+
+
+ let availability = [
+ MinVersion<SPIRV_V_1_0>,
+ MaxVersion<SPIRV_V_1_6>,
+ Extension<[SPV_INTEL_tensor_float32_conversion]>,
+ Capability<[SPIRV_C_TensorFloat32RoundingINTEL]>
+ ];
+
+ let arguments = (ins
+ SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$operand
+ );
+
+ let results = (outs
+ SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$result
+ );
+ let assemblyFormat = [{
+ $operand attr-dict `:` type($operand) `to` type($result)
+ }];
+
+ let hasVerifier = 1;
+}
// -----
diff --git a/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp b/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp
index e27dc274673be..fc3e7308356bf 100644
--- a/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp
@@ -311,6 +311,27 @@ LogicalResult INTELConvertFToBF16Op::verify() {
return success();
}
+//===----------------------------------------------------------------------===//
+// spirv.INTELRoundFToTF32Op
+//===----------------------------------------------------------------------===//
+
+LogicalResult INTELRoundFToTF32Op::verify() {
+ auto operandType = getOperand().getType();
+ auto resultType = getResult().getType();
+ // ODS checks that vector result type and vector operand type have the same
+ // shape.
+ if (auto vectorType = llvm::dyn_cast<VectorType>(operandType)) {
+ unsigned operandNumElements = vectorType.getNumElements();
+ unsigned resultNumElements =
+ llvm::cast<VectorType>(resultType).getNumElements();
+ if (operandNumElements != resultNumElements) {
+ return emitOpError(
+ "operand and result must have same number of elements");
+ }
+ }
+ return success();
+}
+
//===----------------------------------------------------------------------===//
// spirv.FConvertOp
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir b/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
index bb15d018a6c44..aa5bee5796cfa 100644
--- a/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
@@ -72,6 +72,42 @@ spirv.func @bf16_to_f32_vec_unsupported(%arg0 : vector<2xi16>) "None" {
// -----
+//===----------------------------------------------------------------------===//
+// spirv.INTEL.RoundFToTF32
+//===----------------------------------------------------------------------===//
+
+spirv.func @f32_to_tf32(%arg0 : f32) "None" {
+ // CHECK: {{%.*}} = spirv.INTEL.RoundFToTF32 {{%.*}} : f32 to f32
+ %0 = spirv.INTEL.RoundFToTF32 %arg0 : f32 to f32
+ spirv.Return
+}
+
+// -----
+
+spirv.func @f32_to_tf32_vec(%arg0 : vector<2xf32>) "None" {
+ // CHECK: {{%.*}} = spirv.INTEL.RoundFToTF32 {{%.*}} : vector<2xf32> to vector<2xf32>
+ %0 = spirv.INTEL.RoundFToTF32 %arg0 : vector<2xf32> to vector<2xf32>
+ spirv.Return
+}
+
+// -----
+
+spirv.func @f32_to_tf32_unsupported(%arg0 : f64) "None" {
+ // expected-error @+1 {{operand #0 must be Float32 or vector of Float32 values of length 2/3/4/8/16, but got}}
+ %0 = spirv.INTEL.RoundFToTF32 %arg0 : f64 to f32
+ spirv.Return
+}
+
+// -----
+
+spirv.func @f32_to_tf32_vec_unsupported(%arg0 : vector<2xf32>) "None" {
+ // expected-error @+1 {{operand and result must have same number of elements}}
+ %0 = spirv.INTEL.RoundFToTF32 %arg0 : vector<2xf32> to vector<4xf32>
+ spirv.Return
+}
+
+// -----
+
//===----------------------------------------------------------------------===//
// spirv.INTEL.SplitBarrier
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Target/SPIRV/intel-ext-ops.mlir b/mlir/test/Target/SPIRV/intel-ext-ops.mlir
index 6d2fd324363c6..53cf8bf8fbd62 100644
--- a/mlir/test/Target/SPIRV/intel-ext-ops.mlir
+++ b/mlir/test/Target/SPIRV/intel-ext-ops.mlir
@@ -32,6 +32,28 @@ spirv.module Logical GLSL450 requires #spirv.vce<v1.0, [Bfloat16ConversionINTEL]
// -----
+//===----------------------------------------------------------------------===//
+// spirv.INTEL.RoundFToTF32
+//===----------------------------------------------------------------------===//
+
+spirv.module Logical GLSL450 requires #spirv.vce<v1.0, [TensorFloat32RoundingINTEL], [SPV_INTEL_tensor_float32_conversion]> {
+ // CHECK-LABEL: @f32_to_tf32
+ spirv.func @f32_to_tf32(%arg0 : f32) "None" {
+ // CHECK: {{%.*}} = spirv.INTEL.RoundFToTF32 {{%.*}} : f32 to f32
+ %1 = spirv.INTEL.RoundFToTF32 %arg0 : f32 to f32
+ spirv.Return
+ }
+
+ // CHECK-LABEL: @f32_to_tf32_vec
+ spirv.func @f32_to_tf32_vec(%arg0 : vector<2xf32>) "None" {
+ // CHECK: {{%.*}} = spirv.INTEL.RoundFToTF32 {{%.*}} : vector<2xf32> to vector<2xf32>
+ %1 = spirv.INTEL.RoundFToTF32 %arg0 : vector<2xf32> to vector<2xf32>
+ spirv.Return
+ }
+}
+
+// -----
+
//===----------------------------------------------------------------------===//
// spirv.INTEL.SplitBarrier
//===----------------------------------------------------------------------===//
>From 4f4bfd035051ae70373dec6e90d8f19fa728ef8d Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Wed, 30 Jul 2025 07:18:11 -0700
Subject: [PATCH 02/14] remove the grammar definition
---
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td | 6 ------
1 file changed, 6 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
index b692c07122683..215d57532ca84 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
@@ -127,12 +127,6 @@ def SPIRV_INTELRoundFToTF32Op : SPIRV_IntelVendorOp<"RoundFToTF32", []> {
It must have the same number of components as Result Type. The component width must be 32 bits.
Results are computed per component.
-
-
- ```
- convert-f-to-tf32-op ::= ssa-id `=` `spirv.INTEL.RoundFToTF32` ssa-use
- `:` operand-type `to` result-type
- ```
#### Example:
>From 0bbd2687ecdd2ca51ec53792fea5dadcc1383a68 Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Wed, 30 Jul 2025 08:36:55 -0700
Subject: [PATCH 03/14] modify CastOps.cpp to add vector non-scalable check
---
mlir/lib/Dialect/SPIRV/IR/CastOps.cpp | 51 ++++++++++++++++-----------
1 file changed, 30 insertions(+), 21 deletions(-)
diff --git a/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp b/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp
index fc3e7308356bf..d3672220d7c03 100644
--- a/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp
@@ -276,13 +276,16 @@ LogicalResult ConvertUToFOp::verify() {
LogicalResult INTELConvertBF16ToFOp::verify() {
auto operandType = getOperand().getType();
auto resultType = getResult().getType();
- // ODS checks that vector result type and vector operand type have the same
- // shape.
- if (auto vectorType = llvm::dyn_cast<VectorType>(operandType)) {
- unsigned operandNumElements = vectorType.getNumElements();
- unsigned resultNumElements =
- llvm::cast<VectorType>(resultType).getNumElements();
- if (operandNumElements != resultNumElements) {
+ // ODS checks that vector result type and vector operand type are
+ // non-scalable and have the same shape.
+ auto operandVectorType = dyn_cast<VectorType>(operandType);
+ auto resultVectorType = dyn_cast<VectorType>(resultType);
+ if (operandVectorType && resultVectorType) {
+ if (operandVectorType.isScalable() || resultVectorType.isScalable()) {
+ return emitOpError("scalable vectors are not supported");
+ }
+ if (operandVectorType.getNumElements() !=
+ resultVectorType.getNumElements()) {
return emitOpError(
"operand and result must have same number of elements");
}
@@ -297,13 +300,16 @@ LogicalResult INTELConvertBF16ToFOp::verify() {
LogicalResult INTELConvertFToBF16Op::verify() {
auto operandType = getOperand().getType();
auto resultType = getResult().getType();
- // ODS checks that vector result type and vector operand type have the same
- // shape.
- if (auto vectorType = llvm::dyn_cast<VectorType>(operandType)) {
- unsigned operandNumElements = vectorType.getNumElements();
- unsigned resultNumElements =
- llvm::cast<VectorType>(resultType).getNumElements();
- if (operandNumElements != resultNumElements) {
+ // ODS checks that vector result type and vector operand type are
+ // non-scalable and have the same shape.
+ auto operandVectorType = dyn_cast<VectorType>(operandType);
+ auto resultVectorType = dyn_cast<VectorType>(resultType);
+ if (operandVectorType && resultVectorType) {
+ if (operandVectorType.isScalable() || resultVectorType.isScalable()) {
+ return emitOpError("scalable vectors are not supported");
+ }
+ if (operandVectorType.getNumElements() !=
+ resultVectorType.getNumElements()) {
return emitOpError(
"operand and result must have same number of elements");
}
@@ -318,13 +324,16 @@ LogicalResult INTELConvertFToBF16Op::verify() {
LogicalResult INTELRoundFToTF32Op::verify() {
auto operandType = getOperand().getType();
auto resultType = getResult().getType();
- // ODS checks that vector result type and vector operand type have the same
- // shape.
- if (auto vectorType = llvm::dyn_cast<VectorType>(operandType)) {
- unsigned operandNumElements = vectorType.getNumElements();
- unsigned resultNumElements =
- llvm::cast<VectorType>(resultType).getNumElements();
- if (operandNumElements != resultNumElements) {
+ // ODS checks that vector result type and vector operand type are
+ // non-scalable and have the same shape.
+ auto operandVectorType = dyn_cast<VectorType>(operandType);
+ auto resultVectorType = dyn_cast<VectorType>(resultType);
+ if (operandVectorType && resultVectorType) {
+ if (operandVectorType.isScalable() || resultVectorType.isScalable()) {
+ return emitOpError("scalable vectors are not supported");
+ }
+ if (operandVectorType.getNumElements() !=
+ resultVectorType.getNumElements()) {
return emitOpError(
"operand and result must have same number of elements");
}
>From 446149f885ef2efadfcdac583c8bbe5d7f1df88a Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Wed, 30 Jul 2025 13:38:55 -0700
Subject: [PATCH 04/14] use SameOperandsAndResultShape vector shape check
---
.../mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td | 6 ++--
mlir/lib/Dialect/SPIRV/IR/CastOps.cpp | 36 +++++--------------
mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir | 6 ++--
3 files changed, 15 insertions(+), 33 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
index 215d57532ca84..62d5826e008b3 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
@@ -20,7 +20,7 @@
// -----
-def SPIRV_INTELConvertFToBF16Op : SPIRV_IntelVendorOp<"ConvertFToBF16", []> {
+def SPIRV_INTELConvertFToBF16Op : SPIRV_IntelVendorOp<"ConvertFToBF16", [SameOperandsAndResultShape]> {
let summary = "See extension SPV_INTEL_bfloat16_conversion";
let description = [{
@@ -68,7 +68,7 @@ def SPIRV_INTELConvertFToBF16Op : SPIRV_IntelVendorOp<"ConvertFToBF16", []> {
// -----
-def SPIRV_INTELConvertBF16ToFOp : SPIRV_IntelVendorOp<"ConvertBF16ToF", []> {
+def SPIRV_INTELConvertBF16ToFOp : SPIRV_IntelVendorOp<"ConvertBF16ToF", [SameOperandsAndResultShape]> {
let summary = "See extension SPV_INTEL_bfloat16_conversion";
let description = [{
@@ -113,7 +113,7 @@ def SPIRV_INTELConvertBF16ToFOp : SPIRV_IntelVendorOp<"ConvertBF16ToF", []> {
// -----
-def SPIRV_INTELRoundFToTF32Op : SPIRV_IntelVendorOp<"RoundFToTF32", []> {
+def SPIRV_INTELRoundFToTF32Op : SPIRV_IntelVendorOp<"RoundFToTF32", [SameOperandsAndResultShape]> {
let summary = "See extension SPV_INTEL_tensor_float32_conversion";
let description = [{
diff --git a/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp b/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp
index d3672220d7c03..d5f19ab710daa 100644
--- a/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp
@@ -277,18 +277,12 @@ LogicalResult INTELConvertBF16ToFOp::verify() {
auto operandType = getOperand().getType();
auto resultType = getResult().getType();
// ODS checks that vector result type and vector operand type are
- // non-scalable and have the same shape.
- auto operandVectorType = dyn_cast<VectorType>(operandType);
- auto resultVectorType = dyn_cast<VectorType>(resultType);
- if (operandVectorType && resultVectorType) {
+ // non-scalable.
+ if (auto operandVectorType = dyn_cast<VectorType>(operandType)) {
+ auto resultVectorType = dyn_cast<VectorType>(resultType);
if (operandVectorType.isScalable() || resultVectorType.isScalable()) {
return emitOpError("scalable vectors are not supported");
}
- if (operandVectorType.getNumElements() !=
- resultVectorType.getNumElements()) {
- return emitOpError(
- "operand and result must have same number of elements");
- }
}
return success();
}
@@ -301,18 +295,12 @@ LogicalResult INTELConvertFToBF16Op::verify() {
auto operandType = getOperand().getType();
auto resultType = getResult().getType();
// ODS checks that vector result type and vector operand type are
- // non-scalable and have the same shape.
- auto operandVectorType = dyn_cast<VectorType>(operandType);
- auto resultVectorType = dyn_cast<VectorType>(resultType);
- if (operandVectorType && resultVectorType) {
+ // non-scalable.
+ if (auto operandVectorType = dyn_cast<VectorType>(operandType)) {
+ auto resultVectorType = dyn_cast<VectorType>(resultType);
if (operandVectorType.isScalable() || resultVectorType.isScalable()) {
return emitOpError("scalable vectors are not supported");
}
- if (operandVectorType.getNumElements() !=
- resultVectorType.getNumElements()) {
- return emitOpError(
- "operand and result must have same number of elements");
- }
}
return success();
}
@@ -325,18 +313,12 @@ LogicalResult INTELRoundFToTF32Op::verify() {
auto operandType = getOperand().getType();
auto resultType = getResult().getType();
// ODS checks that vector result type and vector operand type are
- // non-scalable and have the same shape.
- auto operandVectorType = dyn_cast<VectorType>(operandType);
- auto resultVectorType = dyn_cast<VectorType>(resultType);
- if (operandVectorType && resultVectorType) {
+ // non-scalable.
+ if (auto operandVectorType = dyn_cast<VectorType>(operandType)) {
+ auto resultVectorType = dyn_cast<VectorType>(resultType);
if (operandVectorType.isScalable() || resultVectorType.isScalable()) {
return emitOpError("scalable vectors are not supported");
}
- if (operandVectorType.getNumElements() !=
- resultVectorType.getNumElements()) {
- return emitOpError(
- "operand and result must have same number of elements");
- }
}
return success();
}
diff --git a/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir b/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
index aa5bee5796cfa..e3cce924802a3 100644
--- a/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
@@ -29,7 +29,7 @@ spirv.func @f32_to_bf16_unsupported(%arg0 : f64) "None" {
// -----
spirv.func @f32_to_bf16_vec_unsupported(%arg0 : vector<2xf32>) "None" {
- // expected-error @+1 {{operand and result must have same number of elements}}
+ // expected-error @+1 {{op requires the same shape for all operands and results}}
%0 = spirv.INTEL.ConvertFToBF16 %arg0 : vector<2xf32> to vector<4xi16>
spirv.Return
}
@@ -65,7 +65,7 @@ spirv.func @bf16_to_f32_unsupported(%arg0 : i16) "None" {
// -----
spirv.func @bf16_to_f32_vec_unsupported(%arg0 : vector<2xi16>) "None" {
- // expected-error @+1 {{operand and result must have same number of elements}}
+ // expected-error @+1 {{op requires the same shape for all operands and results}}
%0 = spirv.INTEL.ConvertBF16ToF %arg0 : vector<2xi16> to vector<3xf32>
spirv.Return
}
@@ -101,7 +101,7 @@ spirv.func @f32_to_tf32_unsupported(%arg0 : f64) "None" {
// -----
spirv.func @f32_to_tf32_vec_unsupported(%arg0 : vector<2xf32>) "None" {
- // expected-error @+1 {{operand and result must have same number of elements}}
+ // expected-error @+1 {{op requires the same shape for all operands and results}}
%0 = spirv.INTEL.RoundFToTF32 %arg0 : vector<2xf32> to vector<4xf32>
spirv.Return
}
>From d12696c0fcc1e48df9e1043ecd881c4e4331e458 Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Wed, 30 Jul 2025 14:05:37 -0700
Subject: [PATCH 05/14] formating
---
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td | 1 +
1 file changed, 1 insertion(+)
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
index 62d5826e008b3..805719bda770d 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
@@ -152,6 +152,7 @@ def SPIRV_INTELRoundFToTF32Op : SPIRV_IntelVendorOp<"RoundFToTF32", [SameOperand
let results = (outs
SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$result
);
+
let assemblyFormat = [{
$operand attr-dict `:` type($operand) `to` type($result)
}];
>From fd900223a6b2c7513ff2cc73b1606fdd93dcdcfe Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Wed, 30 Jul 2025 14:32:00 -0700
Subject: [PATCH 06/14] formating
---
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
index 9c9eefd054fa6..89ae6bba13149 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
@@ -469,14 +469,14 @@ def SPIRV_ExtensionAttr :
SPV_INTEL_debug_module, SPV_INTEL_fp_fast_math_mode,
SPV_INTEL_memory_access_aliasing, SPV_INTEL_split_barrier,
SPV_INTEL_bfloat16_conversion, SPV_INTEL_cache_controls,
+ SPV_INTEL_tensor_float32_conversion,
SPV_NV_compute_shader_derivatives, SPV_NV_cooperative_matrix,
SPV_NV_fragment_shader_barycentric, SPV_NV_geometry_shader_passthrough,
SPV_NV_ray_tracing, SPV_NV_sample_mask_override_coverage,
SPV_NV_shader_image_footprint, SPV_NV_shader_sm_builtins,
SPV_NV_shader_subgroup_partitioned, SPV_NV_shading_rate,
SPV_NV_stereo_view_rendering, SPV_NV_viewport_array2, SPV_NV_bindless_texture,
- SPV_NV_ray_tracing_motion_blur, SPV_NVX_multiview_per_view_attributes,
- SPV_INTEL_tensor_float32_conversion
+ SPV_NV_ray_tracing_motion_blur, SPV_NVX_multiview_per_view_attributes
]>;
//===----------------------------------------------------------------------===//
>From 9c078c1d6ad4e0fa564a88842b9e2407796a489f Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Thu, 31 Jul 2025 11:26:11 -0700
Subject: [PATCH 07/14] replace the verify function with
FixedVectorOfLengthAndType
---
.../mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td | 38 +++++++++----
mlir/lib/Dialect/SPIRV/IR/CastOps.cpp | 54 -------------------
mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir | 6 +--
3 files changed, 32 insertions(+), 66 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
index 805719bda770d..7729703a1f4e7 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
@@ -53,17 +53,24 @@ def SPIRV_INTELConvertFToBF16Op : SPIRV_IntelVendorOp<"ConvertFToBF16", [SameOpe
];
let arguments = (ins
- SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$operand
+ AnyTypeOf<[
+ SPIRV_Float32,
+ FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
+ ]>:$operand
);
let results = (outs
- SPIRV_ScalarOrVectorOf<SPIRV_Int16>:$result
+ AnyTypeOf<[
+ SPIRV_Int16,
+ FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Int16]>
+ ]>:$result
);
+
let assemblyFormat = [{
$operand attr-dict `:` type($operand) `to` type($result)
}];
- let hasVerifier = 1;
+ let hasVerifier = 0;
}
// -----
@@ -98,17 +105,24 @@ def SPIRV_INTELConvertBF16ToFOp : SPIRV_IntelVendorOp<"ConvertBF16ToF", [SameOpe
];
let arguments = (ins
- SPIRV_ScalarOrVectorOf<SPIRV_Int16>:$operand
+ AnyTypeOf<[
+ SPIRV_Int16,
+ FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Int16]>
+ ]>:$operand
);
let results = (outs
- SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$result
+ AnyTypeOf<[
+ SPIRV_Float32,
+ FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
+ ]>:$result
);
let assemblyFormat = [{
$operand attr-dict `:` type($operand) `to` type($result)
}];
- let hasVerifier = 1;
+
+ let hasVerifier = 0;
}
// -----
@@ -146,18 +160,24 @@ def SPIRV_INTELRoundFToTF32Op : SPIRV_IntelVendorOp<"RoundFToTF32", [SameOperand
];
let arguments = (ins
- SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$operand
+ AnyTypeOf<[
+ SPIRV_Float32,
+ FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
+ ]>:$operand
);
let results = (outs
- SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$result
+ AnyTypeOf<[
+ SPIRV_Float32,
+ FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
+ ]>:$result
);
let assemblyFormat = [{
$operand attr-dict `:` type($operand) `to` type($result)
}];
- let hasVerifier = 1;
+ let hasVerifier = 0;
}
// -----
diff --git a/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp b/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp
index d5f19ab710daa..fcf4eb6fbcf60 100644
--- a/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/CastOps.cpp
@@ -269,60 +269,6 @@ LogicalResult ConvertUToFOp::verify() {
/*skipBitWidthCheck=*/true);
}
-//===----------------------------------------------------------------------===//
-// spirv.INTELConvertBF16ToFOp
-//===----------------------------------------------------------------------===//
-
-LogicalResult INTELConvertBF16ToFOp::verify() {
- auto operandType = getOperand().getType();
- auto resultType = getResult().getType();
- // ODS checks that vector result type and vector operand type are
- // non-scalable.
- if (auto operandVectorType = dyn_cast<VectorType>(operandType)) {
- auto resultVectorType = dyn_cast<VectorType>(resultType);
- if (operandVectorType.isScalable() || resultVectorType.isScalable()) {
- return emitOpError("scalable vectors are not supported");
- }
- }
- return success();
-}
-
-//===----------------------------------------------------------------------===//
-// spirv.INTELConvertFToBF16Op
-//===----------------------------------------------------------------------===//
-
-LogicalResult INTELConvertFToBF16Op::verify() {
- auto operandType = getOperand().getType();
- auto resultType = getResult().getType();
- // ODS checks that vector result type and vector operand type are
- // non-scalable.
- if (auto operandVectorType = dyn_cast<VectorType>(operandType)) {
- auto resultVectorType = dyn_cast<VectorType>(resultType);
- if (operandVectorType.isScalable() || resultVectorType.isScalable()) {
- return emitOpError("scalable vectors are not supported");
- }
- }
- return success();
-}
-
-//===----------------------------------------------------------------------===//
-// spirv.INTELRoundFToTF32Op
-//===----------------------------------------------------------------------===//
-
-LogicalResult INTELRoundFToTF32Op::verify() {
- auto operandType = getOperand().getType();
- auto resultType = getResult().getType();
- // ODS checks that vector result type and vector operand type are
- // non-scalable.
- if (auto operandVectorType = dyn_cast<VectorType>(operandType)) {
- auto resultVectorType = dyn_cast<VectorType>(resultType);
- if (operandVectorType.isScalable() || resultVectorType.isScalable()) {
- return emitOpError("scalable vectors are not supported");
- }
- }
- return success();
-}
-
//===----------------------------------------------------------------------===//
// spirv.FConvertOp
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir b/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
index e3cce924802a3..55153a78fba5b 100644
--- a/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
@@ -21,7 +21,7 @@ spirv.func @f32_to_bf16_vec(%arg0 : vector<2xf32>) "None" {
// -----
spirv.func @f32_to_bf16_unsupported(%arg0 : f64) "None" {
- // expected-error @+1 {{operand #0 must be Float32 or vector of Float32 values of length 2/3/4/8/16, but got}}
+ // expected-error @+1 {{op operand #0 must be Float32 or fixed-length vector of Float32 values of length 2/3/4/8/16, but got 'f64'}}
%0 = spirv.INTEL.ConvertFToBF16 %arg0 : f64 to i16
spirv.Return
}
@@ -57,7 +57,7 @@ spirv.func @bf16_to_f32_vec(%arg0 : vector<2xi16>) "None" {
// -----
spirv.func @bf16_to_f32_unsupported(%arg0 : i16) "None" {
- // expected-error @+1 {{result #0 must be Float32 or vector of Float32 values of length 2/3/4/8/16, but got}}
+ // expected-error @+1 {{op result #0 must be Float32 or fixed-length vector of Float32 values of length 2/3/4/8/16, but got 'f16'}}
%0 = spirv.INTEL.ConvertBF16ToF %arg0 : i16 to f16
spirv.Return
}
@@ -93,7 +93,7 @@ spirv.func @f32_to_tf32_vec(%arg0 : vector<2xf32>) "None" {
// -----
spirv.func @f32_to_tf32_unsupported(%arg0 : f64) "None" {
- // expected-error @+1 {{operand #0 must be Float32 or vector of Float32 values of length 2/3/4/8/16, but got}}
+ // expected-error @+1 {{op operand #0 must be Float32 or fixed-length vector of Float32 values of length 2/3/4/8/16, but got 'f64'}}
%0 = spirv.INTEL.RoundFToTF32 %arg0 : f64 to f32
spirv.Return
}
>From cc8641e5519f58da1995e336b3a7a448a2e0f071 Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Thu, 31 Jul 2025 12:57:03 -0700
Subject: [PATCH 08/14] update SPIRV_VectorOf to use FixedVectorOfLengthAndType
---
.../mlir/Dialect/SPIRV/IR/SPIRVBase.td | 2 +-
.../mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td | 30 ++++---------------
2 files changed, 7 insertions(+), 25 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
index 89ae6bba13149..305a51aae050f 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
@@ -4286,7 +4286,7 @@ class SPIRV_MatrixOfType<list<Type> allowedTypes> :
"Matrix">;
class SPIRV_VectorOf<Type type> :
- VectorOfLengthAndType<[2, 3, 4, 8, 16], [type]>;
+ FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [type]>;
class SPIRV_ScalarOrVectorOf<Type type> :
AnyTypeOf<[type, SPIRV_VectorOf<type>]>;
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
index 7729703a1f4e7..abf373cf3c511 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
@@ -53,17 +53,11 @@ def SPIRV_INTELConvertFToBF16Op : SPIRV_IntelVendorOp<"ConvertFToBF16", [SameOpe
];
let arguments = (ins
- AnyTypeOf<[
- SPIRV_Float32,
- FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
- ]>:$operand
+ SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$operand
);
let results = (outs
- AnyTypeOf<[
- SPIRV_Int16,
- FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Int16]>
- ]>:$result
+ SPIRV_ScalarOrVectorOf<SPIRV_Int16>:$result
);
let assemblyFormat = [{
@@ -105,17 +99,11 @@ def SPIRV_INTELConvertBF16ToFOp : SPIRV_IntelVendorOp<"ConvertBF16ToF", [SameOpe
];
let arguments = (ins
- AnyTypeOf<[
- SPIRV_Int16,
- FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Int16]>
- ]>:$operand
+ SPIRV_ScalarOrVectorOf<SPIRV_Int16>:$operand
);
let results = (outs
- AnyTypeOf<[
- SPIRV_Float32,
- FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
- ]>:$result
+ SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$result
);
let assemblyFormat = [{
@@ -160,17 +148,11 @@ def SPIRV_INTELRoundFToTF32Op : SPIRV_IntelVendorOp<"RoundFToTF32", [SameOperand
];
let arguments = (ins
- AnyTypeOf<[
- SPIRV_Float32,
- FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
- ]>:$operand
+ SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$operand
);
let results = (outs
- AnyTypeOf<[
- SPIRV_Float32,
- FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
- ]>:$result
+ SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$result
);
let assemblyFormat = [{
>From ef0041741888d043270ce7b9dd9a7f140f46ddc8 Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Thu, 31 Jul 2025 13:07:33 -0700
Subject: [PATCH 09/14] nit changes
---
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td | 6 ------
1 file changed, 6 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
index abf373cf3c511..6068a74962a49 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
@@ -63,8 +63,6 @@ def SPIRV_INTELConvertFToBF16Op : SPIRV_IntelVendorOp<"ConvertFToBF16", [SameOpe
let assemblyFormat = [{
$operand attr-dict `:` type($operand) `to` type($result)
}];
-
- let hasVerifier = 0;
}
// -----
@@ -109,8 +107,6 @@ def SPIRV_INTELConvertBF16ToFOp : SPIRV_IntelVendorOp<"ConvertBF16ToF", [SameOpe
let assemblyFormat = [{
$operand attr-dict `:` type($operand) `to` type($result)
}];
-
- let hasVerifier = 0;
}
// -----
@@ -158,8 +154,6 @@ def SPIRV_INTELRoundFToTF32Op : SPIRV_IntelVendorOp<"RoundFToTF32", [SameOperand
let assemblyFormat = [{
$operand attr-dict `:` type($operand) `to` type($result)
}];
-
- let hasVerifier = 0;
}
// -----
>From efc2209b59fc8b5ae4b17f5afea622ef11922f85 Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Thu, 31 Jul 2025 13:20:14 -0700
Subject: [PATCH 10/14] add the hasVerifier = 0
---
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
index 6068a74962a49..abf373cf3c511 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
@@ -63,6 +63,8 @@ def SPIRV_INTELConvertFToBF16Op : SPIRV_IntelVendorOp<"ConvertFToBF16", [SameOpe
let assemblyFormat = [{
$operand attr-dict `:` type($operand) `to` type($result)
}];
+
+ let hasVerifier = 0;
}
// -----
@@ -107,6 +109,8 @@ def SPIRV_INTELConvertBF16ToFOp : SPIRV_IntelVendorOp<"ConvertBF16ToF", [SameOpe
let assemblyFormat = [{
$operand attr-dict `:` type($operand) `to` type($result)
}];
+
+ let hasVerifier = 0;
}
// -----
@@ -154,6 +158,8 @@ def SPIRV_INTELRoundFToTF32Op : SPIRV_IntelVendorOp<"RoundFToTF32", [SameOperand
let assemblyFormat = [{
$operand attr-dict `:` type($operand) `to` type($result)
}];
+
+ let hasVerifier = 0;
}
// -----
>From 78103c1a5c69df25159d68f65e97e71bb29d496d Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Thu, 31 Jul 2025 14:25:54 -0700
Subject: [PATCH 11/14] resolve CI SPIRV test failure
---
.../mlir/Dialect/SPIRV/IR/SPIRVBase.td | 2 +-
.../test/Dialect/SPIRV/IR/arithmetic-ops.mlir | 22 +++++------
mlir/test/Dialect/SPIRV/IR/bit-ops.mlir | 6 +--
mlir/test/Dialect/SPIRV/IR/gl-ops.mlir | 38 +++++++++----------
mlir/test/Dialect/SPIRV/IR/group-ops.mlir | 2 +-
mlir/test/Dialect/SPIRV/IR/image-ops.mlir | 2 +-
.../SPIRV/IR/khr-cooperative-matrix-ops.mlir | 10 ++---
mlir/test/Dialect/SPIRV/IR/logical-ops.mlir | 2 +-
.../Dialect/SPIRV/IR/non-uniform-ops.mlir | 16 ++++----
mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir | 12 +++---
10 files changed, 56 insertions(+), 56 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
index 305a51aae050f..ce9077fec9d69 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
@@ -4883,5 +4883,5 @@ def SPIRV_FPFastMathModeAttr :
SPIRV_FPFMM_AllowRecip, SPIRV_FPFMM_Fast, SPIRV_FPFMM_AllowContractFastINTEL,
SPIRV_FPFMM_AllowReassocINTEL
]>;
-
+
#endif // MLIR_DIALECT_SPIRV_IR_BASE
diff --git a/mlir/test/Dialect/SPIRV/IR/arithmetic-ops.mlir b/mlir/test/Dialect/SPIRV/IR/arithmetic-ops.mlir
index 3adafc15c79f6..55fc59fefa534 100644
--- a/mlir/test/Dialect/SPIRV/IR/arithmetic-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/arithmetic-ops.mlir
@@ -13,7 +13,7 @@ func.func @fadd_scalar(%arg: f32) -> f32 {
// -----
func.func @fadd_bf16_scalar(%arg: bf16) -> bf16 {
- // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%0 = spirv.FAdd %arg, %arg : bf16
return %0 : bf16
}
@@ -33,7 +33,7 @@ func.func @fdiv_scalar(%arg: f32) -> f32 {
// -----
func.func @fdiv_bf16_scalar(%arg: bf16) -> bf16 {
- // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%0 = spirv.FDiv %arg, %arg : bf16
return %0 : bf16
}
@@ -53,7 +53,7 @@ func.func @fmod_scalar(%arg: f32) -> f32 {
// -----
func.func @fmod_bf16_scalar(%arg: bf16) -> bf16 {
- // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%0 = spirv.FMod %arg, %arg : bf16
return %0 : bf16
}
@@ -79,7 +79,7 @@ func.func @fmul_vector(%arg: vector<4xf32>) -> vector<4xf32> {
// -----
func.func @fmul_i32(%arg: i32) -> i32 {
- // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%0 = spirv.FMul %arg, %arg : i32
return %0 : i32
}
@@ -87,7 +87,7 @@ func.func @fmul_i32(%arg: i32) -> i32 {
// -----
func.func @fmul_bf16(%arg: bf16) -> bf16 {
- // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%0 = spirv.FMul %arg, %arg : bf16
return %0 : bf16
}
@@ -95,7 +95,7 @@ func.func @fmul_bf16(%arg: bf16) -> bf16 {
// -----
func.func @fmul_bf16_vector(%arg: vector<4xbf16>) -> vector<4xbf16> {
- // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%0 = spirv.FMul %arg, %arg : vector<4xbf16>
return %0 : vector<4xbf16>
}
@@ -103,7 +103,7 @@ func.func @fmul_bf16_vector(%arg: vector<4xbf16>) -> vector<4xbf16> {
// -----
func.func @fmul_tensor(%arg: tensor<4xf32>) -> tensor<4xf32> {
- // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%0 = spirv.FMul %arg, %arg : tensor<4xf32>
return %0 : tensor<4xf32>
}
@@ -123,7 +123,7 @@ func.func @fnegate_scalar(%arg: f32) -> f32 {
// -----
func.func @fnegate_bf16_scalar(%arg: bf16) -> bf16 {
- // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%0 = spirv.FNegate %arg : bf16
return %0 : bf16
}
@@ -143,7 +143,7 @@ func.func @frem_scalar(%arg: f32) -> f32 {
// -----
func.func @frem_bf16_scalar(%arg: bf16) -> bf16 {
- // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%0 = spirv.FRem %arg, %arg : bf16
return %0 : bf16
}
@@ -163,7 +163,7 @@ func.func @fsub_scalar(%arg: f32) -> f32 {
// -----
func.func @fsub_bf16_scalar(%arg: bf16) -> bf16 {
- // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%0 = spirv.FSub %arg, %arg : bf16
return %0 : bf16
}
@@ -348,7 +348,7 @@ func.func @dot(%arg0: vector<4xf32>, %arg1: vector<4xf32>) -> f16 {
// -----
func.func @dot(%arg0: vector<4xi32>, %arg1: vector<4xi32>) -> i32 {
- // expected-error @+1 {{'spirv.Dot' op operand #0 must be vector of 16/32/64-bit float or BFloat16 values of length 2/3/4/8/16}}
+ // expected-error @+1 {{'spirv.Dot' op operand #0 must be fixed-length vector of 16/32/64-bit float or BFloat16 values of length 2/3/4/8/16}}
%0 = spirv.Dot %arg0, %arg1 : vector<4xi32> -> i32
return %0 : i32
}
diff --git a/mlir/test/Dialect/SPIRV/IR/bit-ops.mlir b/mlir/test/Dialect/SPIRV/IR/bit-ops.mlir
index f3f0ebf60f468..58f566e4eb792 100644
--- a/mlir/test/Dialect/SPIRV/IR/bit-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/bit-ops.mlir
@@ -137,7 +137,7 @@ func.func @bitwise_or_all_ones_vector(%arg: vector<3xi8>) -> vector<3xi8> {
// -----
func.func @bitwise_or_float(%arg0: f16, %arg1: f16) -> f16 {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4}}
+ // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4}}
%0 = spirv.BitwiseOr %arg0, %arg1 : f16
return %0 : f16
}
@@ -165,7 +165,7 @@ func.func @bitwise_xor_vector(%arg: vector<4xi32>) -> vector<4xi32> {
// -----
func.func @bitwise_xor_float(%arg0: f16, %arg1: f16) -> f16 {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4}}
+ // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4}}
%0 = spirv.BitwiseXor %arg0, %arg1 : f16
return %0 : f16
}
@@ -274,7 +274,7 @@ func.func @bitwise_and_zext_vector(%arg: vector<2xi8>) -> vector<2xi32> {
// -----
func.func @bitwise_and_float(%arg0: f16, %arg1: f16) -> f16 {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4}}
+ // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4}}
%0 = spirv.BitwiseAnd %arg0, %arg1 : f16
return %0 : f16
}
diff --git a/mlir/test/Dialect/SPIRV/IR/gl-ops.mlir b/mlir/test/Dialect/SPIRV/IR/gl-ops.mlir
index 5c5d94c40e573..fd8a2ffbbddf9 100644
--- a/mlir/test/Dialect/SPIRV/IR/gl-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/gl-ops.mlir
@@ -19,7 +19,7 @@ func.func @expvec(%arg0 : vector<3xf16>) -> () {
// -----
func.func @exp(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32-bit float or vector of 16/32-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32-bit float or fixed-length vector of 16/32-bit float values}}
%2 = spirv.GL.Exp %arg0 : i32
return
}
@@ -27,7 +27,7 @@ func.func @exp(%arg0 : i32) -> () {
// -----
func.func @exp(%arg0 : vector<5xf32>) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32-bit float or vector of 16/32-bit float values of length 2/3/4}}
+ // expected-error @+1 {{op operand #0 must be 16/32-bit float or fixed-length vector of 16/32-bit float values of length 2/3/4}}
%2 = spirv.GL.Exp %arg0 : vector<5xf32>
return
}
@@ -51,7 +51,7 @@ func.func @exp(%arg0 : i32) -> () {
// -----
func.func @exp_bf16(%arg0 : bf16) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32-bit float or vector of 16/32-bit float values of length 2/3/4}}
+ // expected-error @+1 {{op operand #0 must be 16/32-bit float or fixed-length vector of 16/32-bit float values of length 2/3/4}}
%2 = spirv.GL.Exp %arg0 : bf16
return
}
@@ -101,7 +101,7 @@ func.func @iminmax(%arg0: i32, %arg1: i32) {
// -----
func.func @fmaxminbf16vec(%arg0 : vector<3xbf16>, %arg1 : vector<3xbf16>) {
- // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%1 = spirv.GL.FMax %arg0, %arg1 : vector<3xbf16>
%2 = spirv.GL.FMin %arg0, %arg1 : vector<3xbf16>
return
@@ -499,7 +499,7 @@ func.func @frexp_struct_mismatch_type(%arg0 : f32) -> () {
// -----
func.func @frexp_struct_wrong_type(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%2 = spirv.GL.FrexpStruct %arg0 : i32 -> !spirv.struct<(i32, i32)>
return
}
@@ -614,7 +614,7 @@ func.func @findimsb_vector_i64(%arg0 : vector<3xi64>) -> () {
// -----
func.func @findimsb_error_scalar_float(%arg0 : f32) -> () {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/1}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/1}}
%2 = spirv.GL.FindILsb %arg0 : f32
return
}
@@ -640,7 +640,7 @@ func.func @findsmsb_vector(%arg0 : vector<3xi32>) -> () {
// -----
func.func @findsmsb_error_scalar_i64(%arg0 : i64) -> () {
- // expected-error @+1 {{operand #0 must be Int32 or vector of Int32}}
+ // expected-error @+1 {{operand #0 must be Int32 or fixed-length vector of Int32}}
%2 = spirv.GL.FindSMsb %arg0 : i64
return
}
@@ -666,7 +666,7 @@ func.func @findumsb_vector(%arg0 : vector<3xi32>) -> () {
// -----
func.func @findumsb(%arg0 : i64) -> () {
- // expected-error @+1 {{operand #0 must be Int32 or vector of Int32}}
+ // expected-error @+1 {{operand #0 must be Int32 or fixed-length vector of Int32}}
%2 = spirv.GL.FindUMsb %arg0 : i64
return
}
@@ -692,7 +692,7 @@ func.func @distance_vector(%arg0 : vector<3xf32>, %arg1 : vector<3xf32>) {
// -----
func.func @distance_invalid_type(%arg0 : i32, %arg1 : i32) {
- // expected-error @+1 {{'spirv.GL.Distance' op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16}}
+ // expected-error @+1 {{'spirv.GL.Distance' op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16}}
%0 = spirv.GL.Distance %arg0, %arg1 : i32, i32 -> f32
return
}
@@ -708,7 +708,7 @@ func.func @distance_arg_mismatch(%arg0 : vector<3xf32>, %arg1 : vector<4xf32>) {
// -----
func.func @distance_invalid_vector_size(%arg0 : vector<5xf32>, %arg1 : vector<5xf32>) {
- // expected-error @+1 {{'spirv.GL.Distance' op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16}}
+ // expected-error @+1 {{'spirv.GL.Distance' op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16}}
%0 = spirv.GL.Distance %arg0, %arg1 : vector<5xf32>, vector<5xf32> -> f32
return
}
@@ -736,7 +736,7 @@ func.func @cross(%arg0 : vector<3xf32>, %arg1 : vector<3xf32>) {
// -----
func.func @cross_invalid_type(%arg0 : vector<3xi32>, %arg1 : vector<3xi32>) {
- // expected-error @+1 {{'spirv.GL.Cross' op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'vector<3xi32>'}}
+ // expected-error @+1 {{'spirv.GL.Cross' op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'vector<3xi32>'}}
%0 = spirv.GL.Cross %arg0, %arg1 : vector<3xi32>
return
}
@@ -762,7 +762,7 @@ func.func @normalize_vector(%arg0 : vector<3xf32>) {
// -----
func.func @normalize_invalid_type(%arg0 : i32) {
- // expected-error @+1 {{'spirv.GL.Normalize' op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{'spirv.GL.Normalize' op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%0 = spirv.GL.Normalize %arg0 : i32
return
}
@@ -788,7 +788,7 @@ func.func @reflect_vector(%arg0 : vector<3xf32>, %arg1 : vector<3xf32>) {
// -----
func.func @reflect_invalid_type(%arg0 : i32, %arg1 : i32) {
- // expected-error @+1 {{'spirv.GL.Reflect' op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{'spirv.GL.Reflect' op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%0 = spirv.GL.Reflect %arg0, %arg1 : i32
return
}
@@ -814,7 +814,7 @@ func.func @fractvec(%arg0 : vector<3xf16>) -> () {
// -----
func.func @fract_invalid_type(%arg0 : i32) {
- // expected-error @+1 {{'spirv.GL.Fract' op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{'spirv.GL.Fract' op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%0 = spirv.GL.Fract %arg0 : i32
return
}
@@ -840,7 +840,7 @@ func.func @log2vec(%arg0 : vector<3xf16>) -> () {
// -----
func.func @log2_invalid_type(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32-bit float or vector of 16/32-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32-bit float or fixed-length vector of 16/32-bit float values}}
%0 = spirv.GL.Log2 %arg0 : i32
return
}
@@ -866,7 +866,7 @@ func.func @tanhvec(%arg0 : vector<3xf16>) -> () {
// -----
func.func @tanh_invalid_type(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32-bit float or vector of 16/32-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32-bit float or fixed-length vector of 16/32-bit float values}}
%0 = spirv.GL.Tanh %arg0 : i32
return
}
@@ -892,7 +892,7 @@ func.func @exp2vec(%arg0 : vector<3xf16>) -> () {
// -----
func.func @exp2_invalid_type(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32-bit float or vector of 16/32-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32-bit float or fixed-length vector of 16/32-bit float values}}
%0 = spirv.GL.Exp2 %arg0 : i32
return
}
@@ -1022,7 +1022,7 @@ func.func @lengthvec(%arg0 : vector<3xf32>) -> () {
// -----
func.func @length_i32_in(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'i32'}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'i32'}}
%0 = spirv.GL.Length %arg0 : i32 -> f32
return
}
@@ -1038,7 +1038,7 @@ func.func @length_f16_in(%arg0 : f16) -> () {
// -----
func.func @length_i32vec_in(%arg0 : vector<3xi32>) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'vector<3xi32>'}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'vector<3xi32>'}}
%0 = spirv.GL.Length %arg0 : vector<3xi32> -> f32
return
}
diff --git a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir
index d9957ad804161..d7a4a6d92fcd3 100644
--- a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir
@@ -49,7 +49,7 @@ func.func @group_broadcast_negative_scope(%value: f32, %localid: vector<3xi32> )
// -----
func.func @group_broadcast_negative_locid_dtype(%value: f32, %localid: vector<3xf32> ) -> f32 {
- // expected-error @+1 {{operand #1 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values}}
+ // expected-error @+1 {{op operand #1 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values}}
%0 = spirv.GroupBroadcast <Subgroup> %value, %localid : f32, vector<3xf32>
return %0: f32
}
diff --git a/mlir/test/Dialect/SPIRV/IR/image-ops.mlir b/mlir/test/Dialect/SPIRV/IR/image-ops.mlir
index d3aaef7ebdef6..320a8fa360a5f 100644
--- a/mlir/test/Dialect/SPIRV/IR/image-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/image-ops.mlir
@@ -349,7 +349,7 @@ func.func @image_fetch_2d_result(%arg0: !spirv.image<f32, Dim2D, NoDepth, NonArr
// -----
func.func @image_fetch_float_coords(%arg0: !spirv.image<f32, Dim2D, NoDepth, NonArrayed, SingleSampled, NeedSampler, Rgba8>, %arg1: vector<2xf32>) -> () {
- // expected-error @+1 {{op operand #1 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'vector<2xf32>'}}
+ // expected-error @+1 {{op operand #1 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'vector<2xf32>'}}
%0 = spirv.ImageFetch %arg0, %arg1 : !spirv.image<f32, Dim2D, NoDepth, NonArrayed, SingleSampled, NeedSampler, Rgba8>, vector<2xf32> -> vector<2xf32>
spirv.Return
}
diff --git a/mlir/test/Dialect/SPIRV/IR/khr-cooperative-matrix-ops.mlir b/mlir/test/Dialect/SPIRV/IR/khr-cooperative-matrix-ops.mlir
index 61a35b7c991ba..491c7a7758ce1 100644
--- a/mlir/test/Dialect/SPIRV/IR/khr-cooperative-matrix-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/khr-cooperative-matrix-ops.mlir
@@ -583,7 +583,7 @@ spirv.func @matrix_times_scalar(%a: !spirv.coopmatrix<2x2xf32, Workgroup, Matrix
// These binary arithmetic instructions do not support coop matrix operands.
spirv.func @fmod(%a: !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>, %b: !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>) "None" {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16}}
%p = spirv.FMod %a, %b : !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>
spirv.Return
}
@@ -591,14 +591,14 @@ spirv.func @fmod(%a: !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>, %b: !spirv.c
// -----
spirv.func @frem(%a: !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>, %b: !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>) "None" {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16}}
%p = spirv.FRem %a, %b : !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>
spirv.Return
}
// -----
spirv.func @smod(%a: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>, %b: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>) "None" {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16}}
%p = spirv.SMod %a, %b : !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>
spirv.Return
}
@@ -606,7 +606,7 @@ spirv.func @smod(%a: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>, %b: !spirv.c
// -----
spirv.func @srem(%a: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>, %b: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>) "None" {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16}}
%p = spirv.SRem %a, %b : !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>
spirv.Return
}
@@ -614,7 +614,7 @@ spirv.func @srem(%a: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>, %b: !spirv.c
// -----
spirv.func @umod(%a: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>, %b: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>) "None" {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16}}
%p = spirv.UMod %a, %b : !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>
spirv.Return
}
diff --git a/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir b/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir
index d6c34645f5746..1ba2c8e1b6d5b 100644
--- a/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir
@@ -166,7 +166,7 @@ func.func @logicalUnary(%arg0 : i1)
func.func @logicalUnary(%arg0 : i32)
{
- // expected-error @+1 {{'operand' must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
+ // expected-error @+1 {{'operand' must be bool or fixed-length vector of bool values of length 2/3/4/8/16, but got 'i32'}}
%0 = spirv.LogicalNot %arg0 : i32
return
}
diff --git a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
index 7ab94f17360d5..bdb2abde8d8e6 100644
--- a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
@@ -185,7 +185,7 @@ func.func @group_non_uniform_fmul_clustered_reduce(%val: vector<2xf32>) -> vecto
// -----
func.func @group_non_uniform_bf16_fmul_reduce(%val: bf16) -> bf16 {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'bf16'}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'bf16'}}
%0 = spirv.GroupNonUniformFMul <Workgroup> <Reduce> %val : bf16 -> bf16
return %0: bf16
}
@@ -206,7 +206,7 @@ func.func @group_non_uniform_fmax_reduce(%val: f32) -> f32 {
// -----
func.func @group_non_uniform_bf16_fmax_reduce(%val: bf16) -> bf16 {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'bf16'}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'bf16'}}
%0 = spirv.GroupNonUniformFMax <Workgroup> <Reduce> %val : bf16 -> bf16
return %0: bf16
}
@@ -511,7 +511,7 @@ func.func @group_non_uniform_bitwise_and(%val: i32) -> i32 {
// -----
func.func @group_non_uniform_bitwise_and(%val: i1) -> i1 {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
%0 = spirv.GroupNonUniformBitwiseAnd <Workgroup> <Reduce> %val : i1 -> i1
return %0: i1
}
@@ -532,7 +532,7 @@ func.func @group_non_uniform_bitwise_or(%val: i32) -> i32 {
// -----
func.func @group_non_uniform_bitwise_or(%val: i1) -> i1 {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
%0 = spirv.GroupNonUniformBitwiseOr <Workgroup> <Reduce> %val : i1 -> i1
return %0: i1
}
@@ -553,7 +553,7 @@ func.func @group_non_uniform_bitwise_xor(%val: i32) -> i32 {
// -----
func.func @group_non_uniform_bitwise_xor(%val: i1) -> i1 {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
%0 = spirv.GroupNonUniformBitwiseXor <Workgroup> <Reduce> %val : i1 -> i1
return %0: i1
}
@@ -574,7 +574,7 @@ func.func @group_non_uniform_logical_and(%val: i1) -> i1 {
// -----
func.func @group_non_uniform_logical_and(%val: i32) -> i32 {
- // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
+ // expected-error @+1 {{operand #0 must be bool or fixed-length vector of bool values of length 2/3/4/8/16, but got 'i32'}}
%0 = spirv.GroupNonUniformLogicalAnd <Workgroup> <Reduce> %val : i32 -> i32
return %0: i32
}
@@ -595,7 +595,7 @@ func.func @group_non_uniform_logical_or(%val: i1) -> i1 {
// -----
func.func @group_non_uniform_logical_or(%val: i32) -> i32 {
- // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
+ // expected-error @+1 {{operand #0 must be bool or fixed-length vector of bool values of length 2/3/4/8/16, but got 'i32'}}
%0 = spirv.GroupNonUniformLogicalOr <Workgroup> <Reduce> %val : i32 -> i32
return %0: i32
}
@@ -616,7 +616,7 @@ func.func @group_non_uniform_logical_xor(%val: i1) -> i1 {
// -----
func.func @group_non_uniform_logical_xor(%val: i32) -> i32 {
- // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
+ // expected-error @+1 {{operand #0 must be bool or fixed-length vector of bool values of length 2/3/4/8/16, but got 'i32'}}
%0 = spirv.GroupNonUniformLogicalXor <Workgroup> <Reduce> %val : i32 -> i32
return %0: i32
}
diff --git a/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir b/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir
index 8f021ed3d663d..6aaaa6012fefe 100644
--- a/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir
@@ -19,7 +19,7 @@ func.func @expvec(%arg0 : vector<3xf16>) -> () {
// -----
func.func @exp(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%2 = spirv.CL.exp %arg0 : i32
return
}
@@ -27,7 +27,7 @@ func.func @exp(%arg0 : i32) -> () {
// -----
func.func @exp(%arg0 : vector<5xf32>) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4}}
%2 = spirv.CL.exp %arg0 : vector<5xf32>
return
}
@@ -75,7 +75,7 @@ func.func @fabsf64(%arg0 : f64) -> () {
// -----
func.func @fabs(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
%2 = spirv.CL.fabs %arg0 : i32
return
}
@@ -83,7 +83,7 @@ func.func @fabs(%arg0 : i32) -> () {
// -----
func.func @fabs(%arg0 : vector<5xf32>) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4}}
%2 = spirv.CL.fabs %arg0 : vector<5xf32>
return
}
@@ -137,7 +137,7 @@ func.func @sabsi8(%arg0 : i8) -> () {
// -----
func.func @sabs(%arg0 : f32) -> () {
- // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values}}
+ // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values}}
%2 = spirv.CL.s_abs %arg0 : f32
return
}
@@ -145,7 +145,7 @@ func.func @sabs(%arg0 : f32) -> () {
// -----
func.func @sabs(%arg0 : vector<5xi32>) -> () {
- // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4}}
+ // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4}}
%2 = spirv.CL.s_abs %arg0 : vector<5xi32>
return
}
>From cea605cf4e4829bda5ed21a4be210e9228442c09 Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Thu, 31 Jul 2025 14:32:07 -0700
Subject: [PATCH 12/14] nit change
---
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
index ce9077fec9d69..305a51aae050f 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
@@ -4883,5 +4883,5 @@ def SPIRV_FPFastMathModeAttr :
SPIRV_FPFMM_AllowRecip, SPIRV_FPFMM_Fast, SPIRV_FPFMM_AllowContractFastINTEL,
SPIRV_FPFMM_AllowReassocINTEL
]>;
-
+
#endif // MLIR_DIALECT_SPIRV_IR_BASE
>From d679e948fb67aa4fefccc23b365249f86fb5b885 Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Fri, 1 Aug 2025 07:33:42 -0700
Subject: [PATCH 13/14] Revert the changes in FixedVectorOfLengthAndType
---
.../mlir/Dialect/SPIRV/IR/SPIRVBase.td | 2 +-
.../mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td | 31 +++++++++++----
.../test/Dialect/SPIRV/IR/arithmetic-ops.mlir | 22 +++++------
mlir/test/Dialect/SPIRV/IR/bit-ops.mlir | 6 +--
mlir/test/Dialect/SPIRV/IR/gl-ops.mlir | 38 +++++++++----------
mlir/test/Dialect/SPIRV/IR/group-ops.mlir | 2 +-
mlir/test/Dialect/SPIRV/IR/image-ops.mlir | 2 +-
.../SPIRV/IR/khr-cooperative-matrix-ops.mlir | 10 ++---
mlir/test/Dialect/SPIRV/IR/logical-ops.mlir | 2 +-
.../Dialect/SPIRV/IR/non-uniform-ops.mlir | 16 ++++----
mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir | 12 +++---
11 files changed, 80 insertions(+), 63 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
index 305a51aae050f..89ae6bba13149 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
@@ -4286,7 +4286,7 @@ class SPIRV_MatrixOfType<list<Type> allowedTypes> :
"Matrix">;
class SPIRV_VectorOf<Type type> :
- FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [type]>;
+ VectorOfLengthAndType<[2, 3, 4, 8, 16], [type]>;
class SPIRV_ScalarOrVectorOf<Type type> :
AnyTypeOf<[type, SPIRV_VectorOf<type>]>;
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
index abf373cf3c511..d6b9636ae4b37 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
@@ -53,11 +53,17 @@ def SPIRV_INTELConvertFToBF16Op : SPIRV_IntelVendorOp<"ConvertFToBF16", [SameOpe
];
let arguments = (ins
- SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$operand
+ AnyTypeOf<[
+ SPIRV_Float32,
+ FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
+ ]>:$operand
);
let results = (outs
- SPIRV_ScalarOrVectorOf<SPIRV_Int16>:$result
+ AnyTypeOf<[
+ SPIRV_Int16,
+ FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Int16]>
+ ]>:$result
);
let assemblyFormat = [{
@@ -99,11 +105,17 @@ def SPIRV_INTELConvertBF16ToFOp : SPIRV_IntelVendorOp<"ConvertBF16ToF", [SameOpe
];
let arguments = (ins
- SPIRV_ScalarOrVectorOf<SPIRV_Int16>:$operand
+ AnyTypeOf<[
+ SPIRV_Int16,
+ FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Int16]>
+ ]>:$operand
);
let results = (outs
- SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$result
+ AnyTypeOf<[
+ SPIRV_Float32,
+ FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
+ ]>:$result
);
let assemblyFormat = [{
@@ -139,7 +151,6 @@ def SPIRV_INTELRoundFToTF32Op : SPIRV_IntelVendorOp<"RoundFToTF32", [SameOperand
}];
-
let availability = [
MinVersion<SPIRV_V_1_0>,
MaxVersion<SPIRV_V_1_6>,
@@ -148,11 +159,17 @@ def SPIRV_INTELRoundFToTF32Op : SPIRV_IntelVendorOp<"RoundFToTF32", [SameOperand
];
let arguments = (ins
- SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$operand
+ AnyTypeOf<[
+ SPIRV_Float32,
+ FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
+ ]>:$operand
);
let results = (outs
- SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$result
+ AnyTypeOf<[
+ SPIRV_Float32,
+ FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
+ ]>:$result
);
let assemblyFormat = [{
diff --git a/mlir/test/Dialect/SPIRV/IR/arithmetic-ops.mlir b/mlir/test/Dialect/SPIRV/IR/arithmetic-ops.mlir
index 55fc59fefa534..3adafc15c79f6 100644
--- a/mlir/test/Dialect/SPIRV/IR/arithmetic-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/arithmetic-ops.mlir
@@ -13,7 +13,7 @@ func.func @fadd_scalar(%arg: f32) -> f32 {
// -----
func.func @fadd_bf16_scalar(%arg: bf16) -> bf16 {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%0 = spirv.FAdd %arg, %arg : bf16
return %0 : bf16
}
@@ -33,7 +33,7 @@ func.func @fdiv_scalar(%arg: f32) -> f32 {
// -----
func.func @fdiv_bf16_scalar(%arg: bf16) -> bf16 {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%0 = spirv.FDiv %arg, %arg : bf16
return %0 : bf16
}
@@ -53,7 +53,7 @@ func.func @fmod_scalar(%arg: f32) -> f32 {
// -----
func.func @fmod_bf16_scalar(%arg: bf16) -> bf16 {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%0 = spirv.FMod %arg, %arg : bf16
return %0 : bf16
}
@@ -79,7 +79,7 @@ func.func @fmul_vector(%arg: vector<4xf32>) -> vector<4xf32> {
// -----
func.func @fmul_i32(%arg: i32) -> i32 {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%0 = spirv.FMul %arg, %arg : i32
return %0 : i32
}
@@ -87,7 +87,7 @@ func.func @fmul_i32(%arg: i32) -> i32 {
// -----
func.func @fmul_bf16(%arg: bf16) -> bf16 {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%0 = spirv.FMul %arg, %arg : bf16
return %0 : bf16
}
@@ -95,7 +95,7 @@ func.func @fmul_bf16(%arg: bf16) -> bf16 {
// -----
func.func @fmul_bf16_vector(%arg: vector<4xbf16>) -> vector<4xbf16> {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%0 = spirv.FMul %arg, %arg : vector<4xbf16>
return %0 : vector<4xbf16>
}
@@ -103,7 +103,7 @@ func.func @fmul_bf16_vector(%arg: vector<4xbf16>) -> vector<4xbf16> {
// -----
func.func @fmul_tensor(%arg: tensor<4xf32>) -> tensor<4xf32> {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%0 = spirv.FMul %arg, %arg : tensor<4xf32>
return %0 : tensor<4xf32>
}
@@ -123,7 +123,7 @@ func.func @fnegate_scalar(%arg: f32) -> f32 {
// -----
func.func @fnegate_bf16_scalar(%arg: bf16) -> bf16 {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%0 = spirv.FNegate %arg : bf16
return %0 : bf16
}
@@ -143,7 +143,7 @@ func.func @frem_scalar(%arg: f32) -> f32 {
// -----
func.func @frem_bf16_scalar(%arg: bf16) -> bf16 {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%0 = spirv.FRem %arg, %arg : bf16
return %0 : bf16
}
@@ -163,7 +163,7 @@ func.func @fsub_scalar(%arg: f32) -> f32 {
// -----
func.func @fsub_bf16_scalar(%arg: bf16) -> bf16 {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%0 = spirv.FSub %arg, %arg : bf16
return %0 : bf16
}
@@ -348,7 +348,7 @@ func.func @dot(%arg0: vector<4xf32>, %arg1: vector<4xf32>) -> f16 {
// -----
func.func @dot(%arg0: vector<4xi32>, %arg1: vector<4xi32>) -> i32 {
- // expected-error @+1 {{'spirv.Dot' op operand #0 must be fixed-length vector of 16/32/64-bit float or BFloat16 values of length 2/3/4/8/16}}
+ // expected-error @+1 {{'spirv.Dot' op operand #0 must be vector of 16/32/64-bit float or BFloat16 values of length 2/3/4/8/16}}
%0 = spirv.Dot %arg0, %arg1 : vector<4xi32> -> i32
return %0 : i32
}
diff --git a/mlir/test/Dialect/SPIRV/IR/bit-ops.mlir b/mlir/test/Dialect/SPIRV/IR/bit-ops.mlir
index 58f566e4eb792..f3f0ebf60f468 100644
--- a/mlir/test/Dialect/SPIRV/IR/bit-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/bit-ops.mlir
@@ -137,7 +137,7 @@ func.func @bitwise_or_all_ones_vector(%arg: vector<3xi8>) -> vector<3xi8> {
// -----
func.func @bitwise_or_float(%arg0: f16, %arg1: f16) -> f16 {
- // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4}}
%0 = spirv.BitwiseOr %arg0, %arg1 : f16
return %0 : f16
}
@@ -165,7 +165,7 @@ func.func @bitwise_xor_vector(%arg: vector<4xi32>) -> vector<4xi32> {
// -----
func.func @bitwise_xor_float(%arg0: f16, %arg1: f16) -> f16 {
- // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4}}
%0 = spirv.BitwiseXor %arg0, %arg1 : f16
return %0 : f16
}
@@ -274,7 +274,7 @@ func.func @bitwise_and_zext_vector(%arg: vector<2xi8>) -> vector<2xi32> {
// -----
func.func @bitwise_and_float(%arg0: f16, %arg1: f16) -> f16 {
- // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4}}
%0 = spirv.BitwiseAnd %arg0, %arg1 : f16
return %0 : f16
}
diff --git a/mlir/test/Dialect/SPIRV/IR/gl-ops.mlir b/mlir/test/Dialect/SPIRV/IR/gl-ops.mlir
index fd8a2ffbbddf9..5c5d94c40e573 100644
--- a/mlir/test/Dialect/SPIRV/IR/gl-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/gl-ops.mlir
@@ -19,7 +19,7 @@ func.func @expvec(%arg0 : vector<3xf16>) -> () {
// -----
func.func @exp(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32-bit float or fixed-length vector of 16/32-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32-bit float or vector of 16/32-bit float values}}
%2 = spirv.GL.Exp %arg0 : i32
return
}
@@ -27,7 +27,7 @@ func.func @exp(%arg0 : i32) -> () {
// -----
func.func @exp(%arg0 : vector<5xf32>) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32-bit float or fixed-length vector of 16/32-bit float values of length 2/3/4}}
+ // expected-error @+1 {{op operand #0 must be 16/32-bit float or vector of 16/32-bit float values of length 2/3/4}}
%2 = spirv.GL.Exp %arg0 : vector<5xf32>
return
}
@@ -51,7 +51,7 @@ func.func @exp(%arg0 : i32) -> () {
// -----
func.func @exp_bf16(%arg0 : bf16) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32-bit float or fixed-length vector of 16/32-bit float values of length 2/3/4}}
+ // expected-error @+1 {{op operand #0 must be 16/32-bit float or vector of 16/32-bit float values of length 2/3/4}}
%2 = spirv.GL.Exp %arg0 : bf16
return
}
@@ -101,7 +101,7 @@ func.func @iminmax(%arg0: i32, %arg1: i32) {
// -----
func.func @fmaxminbf16vec(%arg0 : vector<3xbf16>, %arg1 : vector<3xbf16>) {
- // expected-error @+1 {{operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%1 = spirv.GL.FMax %arg0, %arg1 : vector<3xbf16>
%2 = spirv.GL.FMin %arg0, %arg1 : vector<3xbf16>
return
@@ -499,7 +499,7 @@ func.func @frexp_struct_mismatch_type(%arg0 : f32) -> () {
// -----
func.func @frexp_struct_wrong_type(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%2 = spirv.GL.FrexpStruct %arg0 : i32 -> !spirv.struct<(i32, i32)>
return
}
@@ -614,7 +614,7 @@ func.func @findimsb_vector_i64(%arg0 : vector<3xi64>) -> () {
// -----
func.func @findimsb_error_scalar_float(%arg0 : f32) -> () {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/1}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/1}}
%2 = spirv.GL.FindILsb %arg0 : f32
return
}
@@ -640,7 +640,7 @@ func.func @findsmsb_vector(%arg0 : vector<3xi32>) -> () {
// -----
func.func @findsmsb_error_scalar_i64(%arg0 : i64) -> () {
- // expected-error @+1 {{operand #0 must be Int32 or fixed-length vector of Int32}}
+ // expected-error @+1 {{operand #0 must be Int32 or vector of Int32}}
%2 = spirv.GL.FindSMsb %arg0 : i64
return
}
@@ -666,7 +666,7 @@ func.func @findumsb_vector(%arg0 : vector<3xi32>) -> () {
// -----
func.func @findumsb(%arg0 : i64) -> () {
- // expected-error @+1 {{operand #0 must be Int32 or fixed-length vector of Int32}}
+ // expected-error @+1 {{operand #0 must be Int32 or vector of Int32}}
%2 = spirv.GL.FindUMsb %arg0 : i64
return
}
@@ -692,7 +692,7 @@ func.func @distance_vector(%arg0 : vector<3xf32>, %arg1 : vector<3xf32>) {
// -----
func.func @distance_invalid_type(%arg0 : i32, %arg1 : i32) {
- // expected-error @+1 {{'spirv.GL.Distance' op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16}}
+ // expected-error @+1 {{'spirv.GL.Distance' op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16}}
%0 = spirv.GL.Distance %arg0, %arg1 : i32, i32 -> f32
return
}
@@ -708,7 +708,7 @@ func.func @distance_arg_mismatch(%arg0 : vector<3xf32>, %arg1 : vector<4xf32>) {
// -----
func.func @distance_invalid_vector_size(%arg0 : vector<5xf32>, %arg1 : vector<5xf32>) {
- // expected-error @+1 {{'spirv.GL.Distance' op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16}}
+ // expected-error @+1 {{'spirv.GL.Distance' op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16}}
%0 = spirv.GL.Distance %arg0, %arg1 : vector<5xf32>, vector<5xf32> -> f32
return
}
@@ -736,7 +736,7 @@ func.func @cross(%arg0 : vector<3xf32>, %arg1 : vector<3xf32>) {
// -----
func.func @cross_invalid_type(%arg0 : vector<3xi32>, %arg1 : vector<3xi32>) {
- // expected-error @+1 {{'spirv.GL.Cross' op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'vector<3xi32>'}}
+ // expected-error @+1 {{'spirv.GL.Cross' op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'vector<3xi32>'}}
%0 = spirv.GL.Cross %arg0, %arg1 : vector<3xi32>
return
}
@@ -762,7 +762,7 @@ func.func @normalize_vector(%arg0 : vector<3xf32>) {
// -----
func.func @normalize_invalid_type(%arg0 : i32) {
- // expected-error @+1 {{'spirv.GL.Normalize' op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{'spirv.GL.Normalize' op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%0 = spirv.GL.Normalize %arg0 : i32
return
}
@@ -788,7 +788,7 @@ func.func @reflect_vector(%arg0 : vector<3xf32>, %arg1 : vector<3xf32>) {
// -----
func.func @reflect_invalid_type(%arg0 : i32, %arg1 : i32) {
- // expected-error @+1 {{'spirv.GL.Reflect' op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{'spirv.GL.Reflect' op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%0 = spirv.GL.Reflect %arg0, %arg1 : i32
return
}
@@ -814,7 +814,7 @@ func.func @fractvec(%arg0 : vector<3xf16>) -> () {
// -----
func.func @fract_invalid_type(%arg0 : i32) {
- // expected-error @+1 {{'spirv.GL.Fract' op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{'spirv.GL.Fract' op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%0 = spirv.GL.Fract %arg0 : i32
return
}
@@ -840,7 +840,7 @@ func.func @log2vec(%arg0 : vector<3xf16>) -> () {
// -----
func.func @log2_invalid_type(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32-bit float or fixed-length vector of 16/32-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32-bit float or vector of 16/32-bit float values}}
%0 = spirv.GL.Log2 %arg0 : i32
return
}
@@ -866,7 +866,7 @@ func.func @tanhvec(%arg0 : vector<3xf16>) -> () {
// -----
func.func @tanh_invalid_type(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32-bit float or fixed-length vector of 16/32-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32-bit float or vector of 16/32-bit float values}}
%0 = spirv.GL.Tanh %arg0 : i32
return
}
@@ -892,7 +892,7 @@ func.func @exp2vec(%arg0 : vector<3xf16>) -> () {
// -----
func.func @exp2_invalid_type(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32-bit float or fixed-length vector of 16/32-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32-bit float or vector of 16/32-bit float values}}
%0 = spirv.GL.Exp2 %arg0 : i32
return
}
@@ -1022,7 +1022,7 @@ func.func @lengthvec(%arg0 : vector<3xf32>) -> () {
// -----
func.func @length_i32_in(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'i32'}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'i32'}}
%0 = spirv.GL.Length %arg0 : i32 -> f32
return
}
@@ -1038,7 +1038,7 @@ func.func @length_f16_in(%arg0 : f16) -> () {
// -----
func.func @length_i32vec_in(%arg0 : vector<3xi32>) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'vector<3xi32>'}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'vector<3xi32>'}}
%0 = spirv.GL.Length %arg0 : vector<3xi32> -> f32
return
}
diff --git a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir
index d7a4a6d92fcd3..d9957ad804161 100644
--- a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir
@@ -49,7 +49,7 @@ func.func @group_broadcast_negative_scope(%value: f32, %localid: vector<3xi32> )
// -----
func.func @group_broadcast_negative_locid_dtype(%value: f32, %localid: vector<3xf32> ) -> f32 {
- // expected-error @+1 {{op operand #1 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values}}
+ // expected-error @+1 {{operand #1 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values}}
%0 = spirv.GroupBroadcast <Subgroup> %value, %localid : f32, vector<3xf32>
return %0: f32
}
diff --git a/mlir/test/Dialect/SPIRV/IR/image-ops.mlir b/mlir/test/Dialect/SPIRV/IR/image-ops.mlir
index 320a8fa360a5f..d3aaef7ebdef6 100644
--- a/mlir/test/Dialect/SPIRV/IR/image-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/image-ops.mlir
@@ -349,7 +349,7 @@ func.func @image_fetch_2d_result(%arg0: !spirv.image<f32, Dim2D, NoDepth, NonArr
// -----
func.func @image_fetch_float_coords(%arg0: !spirv.image<f32, Dim2D, NoDepth, NonArrayed, SingleSampled, NeedSampler, Rgba8>, %arg1: vector<2xf32>) -> () {
- // expected-error @+1 {{op operand #1 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'vector<2xf32>'}}
+ // expected-error @+1 {{op operand #1 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'vector<2xf32>'}}
%0 = spirv.ImageFetch %arg0, %arg1 : !spirv.image<f32, Dim2D, NoDepth, NonArrayed, SingleSampled, NeedSampler, Rgba8>, vector<2xf32> -> vector<2xf32>
spirv.Return
}
diff --git a/mlir/test/Dialect/SPIRV/IR/khr-cooperative-matrix-ops.mlir b/mlir/test/Dialect/SPIRV/IR/khr-cooperative-matrix-ops.mlir
index 491c7a7758ce1..61a35b7c991ba 100644
--- a/mlir/test/Dialect/SPIRV/IR/khr-cooperative-matrix-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/khr-cooperative-matrix-ops.mlir
@@ -583,7 +583,7 @@ spirv.func @matrix_times_scalar(%a: !spirv.coopmatrix<2x2xf32, Workgroup, Matrix
// These binary arithmetic instructions do not support coop matrix operands.
spirv.func @fmod(%a: !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>, %b: !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>) "None" {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16}}
%p = spirv.FMod %a, %b : !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>
spirv.Return
}
@@ -591,14 +591,14 @@ spirv.func @fmod(%a: !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>, %b: !spirv.c
// -----
spirv.func @frem(%a: !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>, %b: !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>) "None" {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16}}
%p = spirv.FRem %a, %b : !spirv.coopmatrix<2x2xf32, Subgroup, MatrixA>
spirv.Return
}
// -----
spirv.func @smod(%a: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>, %b: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>) "None" {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16}}
%p = spirv.SMod %a, %b : !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>
spirv.Return
}
@@ -606,7 +606,7 @@ spirv.func @smod(%a: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>, %b: !spirv.c
// -----
spirv.func @srem(%a: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>, %b: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>) "None" {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16}}
%p = spirv.SRem %a, %b : !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>
spirv.Return
}
@@ -614,7 +614,7 @@ spirv.func @srem(%a: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>, %b: !spirv.c
// -----
spirv.func @umod(%a: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>, %b: !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>) "None" {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16}}
%p = spirv.UMod %a, %b : !spirv.coopmatrix<2x2xi32, Subgroup, MatrixA>
spirv.Return
}
diff --git a/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir b/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir
index 1ba2c8e1b6d5b..d6c34645f5746 100644
--- a/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir
@@ -166,7 +166,7 @@ func.func @logicalUnary(%arg0 : i1)
func.func @logicalUnary(%arg0 : i32)
{
- // expected-error @+1 {{'operand' must be bool or fixed-length vector of bool values of length 2/3/4/8/16, but got 'i32'}}
+ // expected-error @+1 {{'operand' must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
%0 = spirv.LogicalNot %arg0 : i32
return
}
diff --git a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
index bdb2abde8d8e6..7ab94f17360d5 100644
--- a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
@@ -185,7 +185,7 @@ func.func @group_non_uniform_fmul_clustered_reduce(%val: vector<2xf32>) -> vecto
// -----
func.func @group_non_uniform_bf16_fmul_reduce(%val: bf16) -> bf16 {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'bf16'}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'bf16'}}
%0 = spirv.GroupNonUniformFMul <Workgroup> <Reduce> %val : bf16 -> bf16
return %0: bf16
}
@@ -206,7 +206,7 @@ func.func @group_non_uniform_fmax_reduce(%val: f32) -> f32 {
// -----
func.func @group_non_uniform_bf16_fmax_reduce(%val: bf16) -> bf16 {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'bf16'}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4/8/16, but got 'bf16'}}
%0 = spirv.GroupNonUniformFMax <Workgroup> <Reduce> %val : bf16 -> bf16
return %0: bf16
}
@@ -511,7 +511,7 @@ func.func @group_non_uniform_bitwise_and(%val: i32) -> i32 {
// -----
func.func @group_non_uniform_bitwise_and(%val: i1) -> i1 {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
%0 = spirv.GroupNonUniformBitwiseAnd <Workgroup> <Reduce> %val : i1 -> i1
return %0: i1
}
@@ -532,7 +532,7 @@ func.func @group_non_uniform_bitwise_or(%val: i32) -> i32 {
// -----
func.func @group_non_uniform_bitwise_or(%val: i1) -> i1 {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
%0 = spirv.GroupNonUniformBitwiseOr <Workgroup> <Reduce> %val : i1 -> i1
return %0: i1
}
@@ -553,7 +553,7 @@ func.func @group_non_uniform_bitwise_xor(%val: i32) -> i32 {
// -----
func.func @group_non_uniform_bitwise_xor(%val: i1) -> i1 {
- // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
+ // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}}
%0 = spirv.GroupNonUniformBitwiseXor <Workgroup> <Reduce> %val : i1 -> i1
return %0: i1
}
@@ -574,7 +574,7 @@ func.func @group_non_uniform_logical_and(%val: i1) -> i1 {
// -----
func.func @group_non_uniform_logical_and(%val: i32) -> i32 {
- // expected-error @+1 {{operand #0 must be bool or fixed-length vector of bool values of length 2/3/4/8/16, but got 'i32'}}
+ // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
%0 = spirv.GroupNonUniformLogicalAnd <Workgroup> <Reduce> %val : i32 -> i32
return %0: i32
}
@@ -595,7 +595,7 @@ func.func @group_non_uniform_logical_or(%val: i1) -> i1 {
// -----
func.func @group_non_uniform_logical_or(%val: i32) -> i32 {
- // expected-error @+1 {{operand #0 must be bool or fixed-length vector of bool values of length 2/3/4/8/16, but got 'i32'}}
+ // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
%0 = spirv.GroupNonUniformLogicalOr <Workgroup> <Reduce> %val : i32 -> i32
return %0: i32
}
@@ -616,7 +616,7 @@ func.func @group_non_uniform_logical_xor(%val: i1) -> i1 {
// -----
func.func @group_non_uniform_logical_xor(%val: i32) -> i32 {
- // expected-error @+1 {{operand #0 must be bool or fixed-length vector of bool values of length 2/3/4/8/16, but got 'i32'}}
+ // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}}
%0 = spirv.GroupNonUniformLogicalXor <Workgroup> <Reduce> %val : i32 -> i32
return %0: i32
}
diff --git a/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir b/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir
index 6aaaa6012fefe..8f021ed3d663d 100644
--- a/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir
@@ -19,7 +19,7 @@ func.func @expvec(%arg0 : vector<3xf16>) -> () {
// -----
func.func @exp(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%2 = spirv.CL.exp %arg0 : i32
return
}
@@ -27,7 +27,7 @@ func.func @exp(%arg0 : i32) -> () {
// -----
func.func @exp(%arg0 : vector<5xf32>) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4}}
%2 = spirv.CL.exp %arg0 : vector<5xf32>
return
}
@@ -75,7 +75,7 @@ func.func @fabsf64(%arg0 : f64) -> () {
// -----
func.func @fabs(%arg0 : i32) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
%2 = spirv.CL.fabs %arg0 : i32
return
}
@@ -83,7 +83,7 @@ func.func @fabs(%arg0 : i32) -> () {
// -----
func.func @fabs(%arg0 : vector<5xf32>) -> () {
- // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or fixed-length vector of 16/32/64-bit float values of length 2/3/4}}
+ // expected-error @+1 {{op operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values of length 2/3/4}}
%2 = spirv.CL.fabs %arg0 : vector<5xf32>
return
}
@@ -137,7 +137,7 @@ func.func @sabsi8(%arg0 : i8) -> () {
// -----
func.func @sabs(%arg0 : f32) -> () {
- // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values}}
+ // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values}}
%2 = spirv.CL.s_abs %arg0 : f32
return
}
@@ -145,7 +145,7 @@ func.func @sabs(%arg0 : f32) -> () {
// -----
func.func @sabs(%arg0 : vector<5xi32>) -> () {
- // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or fixed-length vector of 8/16/32/64-bit integer values of length 2/3/4}}
+ // expected-error @+1 {{op operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4}}
%2 = spirv.CL.s_abs %arg0 : vector<5xi32>
return
}
>From 18d0ffaf1544a08bdf7d83e7302216278f8c3571 Mon Sep 17 00:00:00 2001
From: "Zhang, Yixing" <yixing.zhang at intel.com>
Date: Fri, 1 Aug 2025 07:50:10 -0700
Subject: [PATCH 14/14] remove the fixed-length vector check for the
conversion extension
---
.../mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td | 30 ++++---------------
mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir | 6 ++--
2 files changed, 9 insertions(+), 27 deletions(-)
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
index d6b9636ae4b37..2a7fa534cc3dc 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVIntelExtOps.td
@@ -53,17 +53,11 @@ def SPIRV_INTELConvertFToBF16Op : SPIRV_IntelVendorOp<"ConvertFToBF16", [SameOpe
];
let arguments = (ins
- AnyTypeOf<[
- SPIRV_Float32,
- FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
- ]>:$operand
+ SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$operand
);
let results = (outs
- AnyTypeOf<[
- SPIRV_Int16,
- FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Int16]>
- ]>:$result
+ SPIRV_ScalarOrVectorOf<SPIRV_Int16>:$result
);
let assemblyFormat = [{
@@ -105,17 +99,11 @@ def SPIRV_INTELConvertBF16ToFOp : SPIRV_IntelVendorOp<"ConvertBF16ToF", [SameOpe
];
let arguments = (ins
- AnyTypeOf<[
- SPIRV_Int16,
- FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Int16]>
- ]>:$operand
+ SPIRV_ScalarOrVectorOf<SPIRV_Int16>:$operand
);
let results = (outs
- AnyTypeOf<[
- SPIRV_Float32,
- FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
- ]>:$result
+ SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$result
);
let assemblyFormat = [{
@@ -159,17 +147,11 @@ def SPIRV_INTELRoundFToTF32Op : SPIRV_IntelVendorOp<"RoundFToTF32", [SameOperand
];
let arguments = (ins
- AnyTypeOf<[
- SPIRV_Float32,
- FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
- ]>:$operand
+ SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$operand
);
let results = (outs
- AnyTypeOf<[
- SPIRV_Float32,
- FixedVectorOfLengthAndType<[2, 3, 4, 8, 16], [SPIRV_Float32]>
- ]>:$result
+ SPIRV_ScalarOrVectorOf<SPIRV_Float32>:$result
);
let assemblyFormat = [{
diff --git a/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir b/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
index 55153a78fba5b..2e356caa30f07 100644
--- a/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/intel-ext-ops.mlir
@@ -21,7 +21,7 @@ spirv.func @f32_to_bf16_vec(%arg0 : vector<2xf32>) "None" {
// -----
spirv.func @f32_to_bf16_unsupported(%arg0 : f64) "None" {
- // expected-error @+1 {{op operand #0 must be Float32 or fixed-length vector of Float32 values of length 2/3/4/8/16, but got 'f64'}}
+ // expected-error @+1 {{op operand #0 must be Float32 or vector of Float32 values of length 2/3/4/8/16, but got 'f64'}}
%0 = spirv.INTEL.ConvertFToBF16 %arg0 : f64 to i16
spirv.Return
}
@@ -57,7 +57,7 @@ spirv.func @bf16_to_f32_vec(%arg0 : vector<2xi16>) "None" {
// -----
spirv.func @bf16_to_f32_unsupported(%arg0 : i16) "None" {
- // expected-error @+1 {{op result #0 must be Float32 or fixed-length vector of Float32 values of length 2/3/4/8/16, but got 'f16'}}
+ // expected-error @+1 {{op result #0 must be Float32 or vector of Float32 values of length 2/3/4/8/16, but got 'f16'}}
%0 = spirv.INTEL.ConvertBF16ToF %arg0 : i16 to f16
spirv.Return
}
@@ -93,7 +93,7 @@ spirv.func @f32_to_tf32_vec(%arg0 : vector<2xf32>) "None" {
// -----
spirv.func @f32_to_tf32_unsupported(%arg0 : f64) "None" {
- // expected-error @+1 {{op operand #0 must be Float32 or fixed-length vector of Float32 values of length 2/3/4/8/16, but got 'f64'}}
+ // expected-error @+1 {{op operand #0 must be Float32 or vector of Float32 values of length 2/3/4/8/16, but got 'f64'}}
%0 = spirv.INTEL.RoundFToTF32 %arg0 : f64 to f32
spirv.Return
}
More information about the Mlir-commits
mailing list