[llvm-branch-commits] [mlir] 167fb9b - [mlir][spirv] Fix script for availability autogen and refresh ops
Lei Zhang via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Jan 22 10:12:26 PST 2021
Author: Lei Zhang
Date: 2021-01-22T13:07:36-05:00
New Revision: 167fb9b4b4352cdea92ccfdfb205c7ed4470d3ef
URL: https://github.com/llvm/llvm-project/commit/167fb9b4b4352cdea92ccfdfb205c7ed4470d3ef
DIFF: https://github.com/llvm/llvm-project/commit/167fb9b4b4352cdea92ccfdfb205c7ed4470d3ef.diff
LOG: [mlir][spirv] Fix script for availability autogen and refresh ops
Previously we only autogen the availability for ops that are
direct instantiating `SPV_Op` and expected other subclasses of
`SPV_Op` to define aggregated availability for all ops. This is
quite error prone and we can miss capabilities for certain ops.
Also it's arguable to have multiple levels of subclasses and try
to deduplicate too much: having the availability directly in the
op can be quite explicit and clear. A few extra lines of
declarative code is fine.
Reviewed By: mravishankar
Differential Revision: https://reviews.llvm.org/D95236
Added:
Modified:
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td
mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
mlir/utils/spirv/gen_spirv_dialect.py
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td
index 5e3bf0b9eccd..c495650c77f1 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td
@@ -92,13 +92,6 @@ def SPV_BitCountOp : SPV_BitUnaryOp<"BitCount", []> {
%3 = spv.BitCount %1: vector<4xi32>
```
}];
-
- let availability = [
- MinVersion<SPV_V_1_0>,
- MaxVersion<SPV_V_1_5>,
- Extension<[]>,
- Capability<[]>
- ];
}
// -----
@@ -341,13 +334,6 @@ def SPV_BitwiseAndOp : SPV_BitBinaryOp<"BitwiseAnd",
%2 = spv.BitwiseAnd %0, %1 : vector<4xi32>
```
}];
-
- let availability = [
- MinVersion<SPV_V_1_0>,
- MaxVersion<SPV_V_1_5>,
- Extension<[]>,
- Capability<[]>
- ];
}
// -----
@@ -383,13 +369,6 @@ def SPV_BitwiseOrOp : SPV_BitBinaryOp<"BitwiseOr",
%2 = spv.BitwiseOr %0, %1 : vector<4xi32>
```
}];
-
- let availability = [
- MinVersion<SPV_V_1_0>,
- MaxVersion<SPV_V_1_5>,
- Extension<[]>,
- Capability<[]>
- ];
}
// -----
@@ -425,13 +404,6 @@ def SPV_BitwiseXorOp : SPV_BitBinaryOp<"BitwiseXor",
%2 = spv.BitwiseXor %0, %1 : vector<4xi32>
```
}];
-
- let availability = [
- MinVersion<SPV_V_1_0>,
- MaxVersion<SPV_V_1_5>,
- Extension<[]>,
- Capability<[]>
- ];
}
// -----
@@ -440,7 +412,7 @@ def SPV_ShiftLeftLogicalOp : SPV_ShiftOp<"ShiftLeftLogical",
[UsableInSpecConstantOp]> {
let summary = [{
Shift the bits in Base left by the number of bits specified in Shift.
- The least-significant bits will be zero filled.
+ The least-significant bits are zero filled.
}];
let description = [{
@@ -477,13 +449,6 @@ def SPV_ShiftLeftLogicalOp : SPV_ShiftOp<"ShiftLeftLogical",
%5 = spv.ShiftLeftLogical %3, %4 : vector<3xi32>, vector<3xi16>
```
}];
-
- let availability = [
- MinVersion<SPV_V_1_0>,
- MaxVersion<SPV_V_1_5>,
- Extension<[]>,
- Capability<[]>
- ];
}
// -----
@@ -492,7 +457,7 @@ def SPV_ShiftRightArithmeticOp : SPV_ShiftOp<"ShiftRightArithmetic",
[UsableInSpecConstantOp]> {
let summary = [{
Shift the bits in Base right by the number of bits specified in Shift.
- The most-significant bits will be filled with the sign bit from Base.
+ The most-significant bits are filled with the sign bit from Base.
}];
let description = [{
@@ -526,13 +491,6 @@ def SPV_ShiftRightArithmeticOp : SPV_ShiftOp<"ShiftRightArithmetic",
%5 = spv.ShiftRightArithmetic %3, %4 : vector<3xi32>, vector<3xi16>
```
}];
-
- let availability = [
- MinVersion<SPV_V_1_0>,
- MaxVersion<SPV_V_1_5>,
- Extension<[]>,
- Capability<[]>
- ];
}
// -----
@@ -541,7 +499,7 @@ def SPV_ShiftRightLogicalOp : SPV_ShiftOp<"ShiftRightLogical",
[UsableInSpecConstantOp]> {
let summary = [{
Shift the bits in Base right by the number of bits specified in Shift.
- The most-significant bits will be zero filled.
+ The most-significant bits are zero filled.
}];
let description = [{
@@ -576,13 +534,6 @@ def SPV_ShiftRightLogicalOp : SPV_ShiftOp<"ShiftRightLogical",
%5 = spv.ShiftRightLogical %3, %4 : vector<3xi32>, vector<3xi16>
```
}];
-
- let availability = [
- MinVersion<SPV_V_1_0>,
- MaxVersion<SPV_V_1_5>,
- Extension<[]>,
- Capability<[]>
- ];
}
// -----
@@ -595,7 +546,7 @@ def SPV_NotOp : SPV_BitUnaryOp<"Not", [UsableInSpecConstantOp]> {
Result Type must be a scalar or vector of integer type.
- Operand’s type must be a scalar or vector of integer type. It must
+ Operand's type must be a scalar or vector of integer type. It must
have the same number of components as Result Type. The component width
must equal the component width in Result Type.
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td
index e384dac65acb..d73217117027 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td
@@ -21,8 +21,7 @@ include "mlir/Interfaces/SideEffectInterfaces.td"
def SPV_CompositeConstructOp : SPV_Op<"CompositeConstruct", [NoSideEffect]> {
let summary = [{
- Construct a new composite object from a set of constituent objects that
- will fully form it.
+ Construct a new composite object from a set of constituent objects.
}];
let description = [{
@@ -217,13 +216,12 @@ def SPV_VectorExtractDynamicOp : SPV_Op<"VectorExtractDynamic",
let results = (outs
SPV_Scalar:$result
);
-
+
let verifier = [{ return success(); }];
let assemblyFormat = [{
$vector `[` $index `]` attr-dict `:` type($vector) `,` type($index)
}];
-
}
// -----
@@ -281,7 +279,7 @@ def SPV_VectorInsertDynamicOp : SPV_Op<"VectorInsertDynamic",
let results = (outs
SPV_Vector:$result
);
-
+
let verifier = [{ return success(); }];
let assemblyFormat = [{
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td
index 8cad962de5d3..02643e19304e 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td
@@ -20,8 +20,8 @@
def SPV_GroupBroadcastOp : SPV_Op<"GroupBroadcast",
[NoSideEffect, AllTypesMatch<["value", "result"]>]> {
let summary = [{
- Return the Value of the invocation identified by the local id LocalId to
- all invocations in the group.
+ Broadcast the Value of the invocation identified by the local id LocalId
+ to the result of all invocations in the group.
}];
let description = [{
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
index 81cdbcb65ac0..0516e70f87c4 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
@@ -719,6 +719,13 @@ def SPV_OrderedOp : SPV_LogicalBinaryOp<"Ordered", SPV_Float, [Commutative]> {
%5 = spv.Ordered %2, %3 : vector<4xf32>
```
}];
+
+ let availability = [
+ MinVersion<SPV_V_1_0>,
+ MaxVersion<SPV_V_1_5>,
+ Extension<[]>,
+ Capability<[SPV_C_Kernel]>
+ ];
}
// -----
@@ -1073,6 +1080,13 @@ def SPV_UnorderedOp : SPV_LogicalBinaryOp<"Unordered", SPV_Float, [Commutative]>
%5 = spv.Unordered %2, %3 : vector<4xf32>
```
}];
+
+ let availability = [
+ MinVersion<SPV_V_1_0>,
+ MaxVersion<SPV_V_1_5>,
+ Extension<[]>,
+ Capability<[SPV_C_Kernel]>
+ ];
}
// -----
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td
index 0f009edd5830..b55afaa7fe69 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td
@@ -62,9 +62,11 @@ def SPV_MatrixTimesMatrixOp : SPV_Op<"MatrixTimesMatrix", [NoSideEffect]> {
let results = (outs
SPV_AnyMatrix:$result
);
+
let assemblyFormat = [{
operands attr-dict `:` type($leftmatrix) `,` type($rightmatrix) `->` type($result)
}];
+
let verifier = [{ return verifyMatrixTimesMatrix(*this); }];
}
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td
index ef296668cbc3..dc14c8b9f41e 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td
@@ -87,8 +87,8 @@ def SPV_CopyMemoryOp : SPV_Op<"CopyMemory", []> {
Target. Both operands must be non-void pointers and having the same <id>
Type operand in their OpTypePointer type declaration. Matching Storage
Class is not required. The amount of memory copied is the size of the
- type pointed to. The copied type must have a fixed size; i.e., it cannot
- be, nor include, any OpTypeRuntimeArray types.
+ type pointed to. The copied type must have a fixed size; i.e., it must
+ not be, nor include, any OpTypeRuntimeArray types.
}];
let description = [{
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td
index 68f3ad3171a8..0bea875e65fc 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td
@@ -17,15 +17,6 @@
class SPV_GroupNonUniformArithmeticOp<string mnemonic, Type type,
list<OpTrait> traits = []> : SPV_Op<mnemonic, traits> {
- let availability = [
- MinVersion<SPV_V_1_3>,
- MaxVersion<SPV_V_1_5>,
- Extension<[]>,
- Capability<[SPV_C_GroupNonUniformArithmetic,
- SPV_C_GroupNonUniformClustered,
- SPV_C_GroupNonUniformPartitionedNV]>
- ];
-
let arguments = (ins
SPV_ScopeAttr:$execution_scope,
SPV_GroupOperationAttr:$group_operation,
@@ -47,7 +38,7 @@ class SPV_GroupNonUniformArithmeticOp<string mnemonic, Type type,
def SPV_GroupNonUniformBallotOp : SPV_Op<"GroupNonUniformBallot", []> {
let summary = [{
- Returns a bitfield value combining the Predicate value from all
+ Result is a bitfield value combining the Predicate value from all
invocations in the group that execute the same dynamic instance of this
instruction. The bit is set to one if the corresponding invocation is
active and the Predicate for that invocation evaluated to true;
@@ -108,8 +99,8 @@ def SPV_GroupNonUniformBallotOp : SPV_Op<"GroupNonUniformBallot", []> {
def SPV_GroupNonUniformBroadcastOp : SPV_Op<"GroupNonUniformBroadcast",
[NoSideEffect, AllTypesMatch<["value", "result"]>]> {
let summary = [{
- Return the Value of the invocation identified by the id Id to all active
- invocations in the group.
+ Result is the Value of the invocation identified by the id Id to all
+ active invocations in the group.
}];
let description = [{
@@ -224,8 +215,7 @@ def SPV_GroupNonUniformElectOp : SPV_Op<"GroupNonUniformElect", []> {
// -----
-def SPV_GroupNonUniformFAddOp :
- SPV_GroupNonUniformArithmeticOp<"GroupNonUniformFAdd", SPV_Float, []> {
+def SPV_GroupNonUniformFAddOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniformFAdd", SPV_Float, []> {
let summary = [{
A floating point add group operation of all Value operands contributed
by active invocations in the group.
@@ -271,12 +261,18 @@ def SPV_GroupNonUniformFAddOp :
%1 = spv.GroupNonUniformFAdd "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
```
}];
+
+ let availability = [
+ MinVersion<SPV_V_1_3>,
+ MaxVersion<SPV_V_1_5>,
+ Extension<[]>,
+ Capability<[SPV_C_GroupNonUniformArithmetic, SPV_C_GroupNonUniformClustered, SPV_C_GroupNonUniformPartitionedNV]>
+ ];
}
// -----
-def SPV_GroupNonUniformFMaxOp :
- SPV_GroupNonUniformArithmeticOp<"GroupNonUniformFMax", SPV_Float, []> {
+def SPV_GroupNonUniformFMaxOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniformFMax", SPV_Float, []> {
let summary = [{
A floating point maximum group operation of all Value operands
contributed by active invocations in by group.
@@ -325,12 +321,18 @@ def SPV_GroupNonUniformFMaxOp :
%1 = spv.GroupNonUniformFMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
```
}];
+
+ let availability = [
+ MinVersion<SPV_V_1_3>,
+ MaxVersion<SPV_V_1_5>,
+ Extension<[]>,
+ Capability<[SPV_C_GroupNonUniformArithmetic, SPV_C_GroupNonUniformClustered, SPV_C_GroupNonUniformPartitionedNV]>
+ ];
}
// -----
-def SPV_GroupNonUniformFMinOp :
- SPV_GroupNonUniformArithmeticOp<"GroupNonUniformFMin", SPV_Float, []> {
+def SPV_GroupNonUniformFMinOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniformFMin", SPV_Float, []> {
let summary = [{
A floating point minimum group operation of all Value operands
contributed by active invocations in the group.
@@ -379,12 +381,18 @@ def SPV_GroupNonUniformFMinOp :
%1 = spv.GroupNonUniformFMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
```
}];
+
+ let availability = [
+ MinVersion<SPV_V_1_3>,
+ MaxVersion<SPV_V_1_5>,
+ Extension<[]>,
+ Capability<[SPV_C_GroupNonUniformArithmetic, SPV_C_GroupNonUniformClustered, SPV_C_GroupNonUniformPartitionedNV]>
+ ];
}
// -----
-def SPV_GroupNonUniformFMulOp :
- SPV_GroupNonUniformArithmeticOp<"GroupNonUniformFMul", SPV_Float, []> {
+def SPV_GroupNonUniformFMulOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniformFMul", SPV_Float, []> {
let summary = [{
A floating point multiply group operation of all Value operands
contributed by active invocations in the group.
@@ -430,12 +438,18 @@ def SPV_GroupNonUniformFMulOp :
%1 = spv.GroupNonUniformFMul "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
```
}];
+
+ let availability = [
+ MinVersion<SPV_V_1_3>,
+ MaxVersion<SPV_V_1_5>,
+ Extension<[]>,
+ Capability<[SPV_C_GroupNonUniformArithmetic, SPV_C_GroupNonUniformClustered, SPV_C_GroupNonUniformPartitionedNV]>
+ ];
}
// -----
-def SPV_GroupNonUniformIAddOp :
- SPV_GroupNonUniformArithmeticOp<"GroupNonUniformIAdd", SPV_Integer, []> {
+def SPV_GroupNonUniformIAddOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniformIAdd", SPV_Integer, []> {
let summary = [{
An integer add group operation of all Value operands contributed by
active invocations in the group.
@@ -479,12 +493,18 @@ def SPV_GroupNonUniformIAddOp :
%1 = spv.GroupNonUniformIAdd "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
```
}];
+
+ let availability = [
+ MinVersion<SPV_V_1_3>,
+ MaxVersion<SPV_V_1_5>,
+ Extension<[]>,
+ Capability<[SPV_C_GroupNonUniformArithmetic, SPV_C_GroupNonUniformClustered, SPV_C_GroupNonUniformPartitionedNV]>
+ ];
}
// -----
-def SPV_GroupNonUniformIMulOp :
- SPV_GroupNonUniformArithmeticOp<"GroupNonUniformIMul", SPV_Integer, []> {
+def SPV_GroupNonUniformIMulOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniformIMul", SPV_Integer, []> {
let summary = [{
An integer multiply group operation of all Value operands contributed by
active invocations in the group.
@@ -528,12 +548,18 @@ def SPV_GroupNonUniformIMulOp :
%1 = spv.GroupNonUniformIMul "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
```
}];
+
+ let availability = [
+ MinVersion<SPV_V_1_3>,
+ MaxVersion<SPV_V_1_5>,
+ Extension<[]>,
+ Capability<[SPV_C_GroupNonUniformArithmetic, SPV_C_GroupNonUniformClustered, SPV_C_GroupNonUniformPartitionedNV]>
+ ];
}
// -----
-def SPV_GroupNonUniformSMaxOp :
- SPV_GroupNonUniformArithmeticOp<"GroupNonUniformSMax",
+def SPV_GroupNonUniformSMaxOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniformSMax",
SPV_Integer,
[SignedOp]> {
let summary = [{
@@ -579,12 +605,18 @@ def SPV_GroupNonUniformSMaxOp :
%1 = spv.GroupNonUniformSMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
```
}];
+
+ let availability = [
+ MinVersion<SPV_V_1_3>,
+ MaxVersion<SPV_V_1_5>,
+ Extension<[]>,
+ Capability<[SPV_C_GroupNonUniformArithmetic, SPV_C_GroupNonUniformClustered, SPV_C_GroupNonUniformPartitionedNV]>
+ ];
}
// -----
-def SPV_GroupNonUniformSMinOp :
- SPV_GroupNonUniformArithmeticOp<"GroupNonUniformSMin",
+def SPV_GroupNonUniformSMinOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniformSMin",
SPV_Integer,
[SignedOp]> {
let summary = [{
@@ -630,12 +662,18 @@ def SPV_GroupNonUniformSMinOp :
%1 = spv.GroupNonUniformSMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
```
}];
+
+ let availability = [
+ MinVersion<SPV_V_1_3>,
+ MaxVersion<SPV_V_1_5>,
+ Extension<[]>,
+ Capability<[SPV_C_GroupNonUniformArithmetic, SPV_C_GroupNonUniformClustered, SPV_C_GroupNonUniformPartitionedNV]>
+ ];
}
// -----
-def SPV_GroupNonUniformUMaxOp :
- SPV_GroupNonUniformArithmeticOp<"GroupNonUniformUMax",
+def SPV_GroupNonUniformUMaxOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniformUMax",
SPV_Integer,
[UnsignedOp]> {
let summary = [{
@@ -682,12 +720,18 @@ def SPV_GroupNonUniformUMaxOp :
%1 = spv.GroupNonUniformUMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
```
}];
+
+ let availability = [
+ MinVersion<SPV_V_1_3>,
+ MaxVersion<SPV_V_1_5>,
+ Extension<[]>,
+ Capability<[SPV_C_GroupNonUniformArithmetic, SPV_C_GroupNonUniformClustered, SPV_C_GroupNonUniformPartitionedNV]>
+ ];
}
// -----
-def SPV_GroupNonUniformUMinOp :
- SPV_GroupNonUniformArithmeticOp<"GroupNonUniformUMin",
+def SPV_GroupNonUniformUMinOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniformUMin",
SPV_Integer,
[UnsignedOp]> {
let summary = [{
@@ -734,6 +778,13 @@ def SPV_GroupNonUniformUMinOp :
%1 = spv.GroupNonUniformUMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
```
}];
+
+ let availability = [
+ MinVersion<SPV_V_1_3>,
+ MaxVersion<SPV_V_1_5>,
+ Extension<[]>,
+ Capability<[SPV_C_GroupNonUniformArithmetic, SPV_C_GroupNonUniformClustered, SPV_C_GroupNonUniformPartitionedNV]>
+ ];
}
// -----
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
index d4bf2861c960..25341e79cd81 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
@@ -607,7 +607,10 @@ def SPV_SpecConstantOp : SPV_Op<"specConstant", [InModuleScope, Symbol]> {
let autogenSerialization = 0;
}
-def SPV_SpecConstantCompositeOp : SPV_Op<"specConstantComposite", [InModuleScope, Symbol]> {
+// -----
+
+def SPV_SpecConstantCompositeOp : SPV_Op<"specConstantComposite", [
+ InModuleScope, Symbol]> {
let summary = "Declare a new composite specialization constant.";
let description = [{
@@ -658,43 +661,7 @@ def SPV_SpecConstantCompositeOp : SPV_Op<"specConstantComposite", [InModuleScope
let autogenSerialization = 0;
}
-def SPV_YieldOp : SPV_Op<"mlir.yield", [
- HasParent<"SpecConstantOperationOp">, NoSideEffect, Terminator]> {
- let summary = [{
- Yields the result computed in `spv.SpecConstantOperation`'s
- region back to the parent op.
- }];
-
- let description = [{
- This op is a special terminator whose only purpose is to terminate
- an `spv.SpecConstantOperation`'s enclosed region. It accepts a
- single operand produced by the preceeding (and only other) instruction
- in its parent block (see SPV_SpecConstantOperation for further
- details). This op has no corresponding SPIR-V instruction.
-
- ```
- spv.mlir.yield ::= `spv.mlir.yield` ssa-id : spirv-type
- ```
-
- #### Example:
- ```mlir
- %0 = ... (some op supported by SPIR-V OpSpecConstantOp)
- spv.mlir.yield %0
- ```
- }];
-
- let arguments = (ins AnyType:$operand);
-
- let results = (outs);
-
- let hasOpcode = 0;
-
- let autogenSerialization = 0;
-
- let assemblyFormat = "attr-dict $operand `:` type($operand)";
-
- let verifier = [{ return success(); }];
-}
+// -----
def SPV_SpecConstantOperationOp : SPV_Op<"SpecConstantOperation", [
NoSideEffect, InFunctionScope,
@@ -784,4 +751,44 @@ def SPV_SpecConstantOperationOp : SPV_Op<"SpecConstantOperation", [
// -----
+def SPV_YieldOp : SPV_Op<"mlir.yield", [
+ HasParent<"SpecConstantOperationOp">, NoSideEffect, Terminator]> {
+ let summary = [{
+ Yields the result computed in `spv.SpecConstantOperation`'s
+ region back to the parent op.
+ }];
+
+ let description = [{
+ This op is a special terminator whose only purpose is to terminate
+ an `spv.SpecConstantOperation`'s enclosed region. It accepts a
+ single operand produced by the preceeding (and only other) instruction
+ in its parent block (see SPV_SpecConstantOperation for further
+ details). This op has no corresponding SPIR-V instruction.
+
+ ```
+ spv.mlir.yield ::= `spv.mlir.yield` ssa-id : spirv-type
+ ```
+
+ #### Example:
+ ```mlir
+ %0 = ... (some op supported by SPIR-V OpSpecConstantOp)
+ spv.mlir.yield %0
+ ```
+ }];
+
+ let arguments = (ins AnyType:$operand);
+
+ let results = (outs);
+
+ let hasOpcode = 0;
+
+ let autogenSerialization = 0;
+
+ let assemblyFormat = "attr-dict $operand `:` type($operand)";
+
+ let verifier = [{ return success(); }];
+}
+
+// -----
+
#endif // MLIR_DIALECT_SPIRV_IR_STRUCTURE_OPS
diff --git a/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir b/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
index 252bc3eb5095..a33db1dd42cf 100644
--- a/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
+++ b/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir
@@ -256,10 +256,18 @@ func @shift_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
return
}
+} // end module
+
+// -----
+
//===----------------------------------------------------------------------===//
// std.cmpf
//===----------------------------------------------------------------------===//
+module attributes {
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
+} {
+
// CHECK-LABEL: @cmpf
func @cmpf(%arg0 : f32, %arg1 : f32) {
// CHECK: spv.FOrdEqual
@@ -286,17 +294,38 @@ func @cmpf(%arg0 : f32, %arg1 : f32) {
%11 = cmpf ule, %arg0, %arg1 : f32
// CHECK: spv.FUnordNotEqual
%12 = cmpf une, %arg0, %arg1 : f32
+ return
+}
+
+} // end module
+
+// -----
+
+module attributes {
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [Kernel], []>, {}>
+} {
+
+// CHECK-LABEL: @cmpf
+func @cmpf(%arg0 : f32, %arg1 : f32) {
// CHECK: spv.Ordered
- %13 = cmpf ord, %arg0, %arg1 : f32
+ %0 = cmpf ord, %arg0, %arg1 : f32
// CHECK: spv.Unordered
- %14 = cmpf uno, %arg0, %arg1 : f32
+ %1 = cmpf uno, %arg0, %arg1 : f32
return
}
+} // end module
+
+// -----
+
//===----------------------------------------------------------------------===//
// std.cmpi
//===----------------------------------------------------------------------===//
+module attributes {
+ spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, {}>
+} {
+
// CHECK-LABEL: @cmpi
func @cmpi(%arg0 : i32, %arg1 : i32) {
// CHECK: spv.IEqual
diff --git a/mlir/utils/spirv/gen_spirv_dialect.py b/mlir/utils/spirv/gen_spirv_dialect.py
index 159d8651223b..ce1bff031bed 100755
--- a/mlir/utils/spirv/gen_spirv_dialect.py
+++ b/mlir/utils/spirv/gen_spirv_dialect.py
@@ -722,13 +722,9 @@ def get_op_definition(instruction, doc, existing_info, capability_mapping):
operands = instruction.get('operands', [])
# Op availability
- avail = ''
- # We assume other instruction categories has a base availability spec, so
- # only add this if this is directly using SPV_Op as the base.
- if inst_category == 'Op':
- avail = get_availability_spec(instruction, capability_mapping, True, False)
- if avail:
- avail = '\n\n {0}'.format(avail)
+ avail = get_availability_spec(instruction, capability_mapping, True, False)
+ if avail:
+ avail = '\n\n {0}'.format(avail)
# Set op's result
results = ''
More information about the llvm-branch-commits
mailing list