[Mlir-commits] [mlir] [MLIR][RISCV] Add VCIX dialect (PR #74780)
Kolya Panchenko
llvmlistbot at llvm.org
Thu Dec 7 15:24:27 PST 2023
https://github.com/nikolaypanchenko created https://github.com/llvm/llvm-project/pull/74780
The changeset adds new dialect called VCIX to support VCIX intrinsics of XSfvcp extension to allow MLIR users to interact with co-processors that are compatible with that extension.
Source: https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
>From 99f02eb6c0302ee591a8b2c5efbb638c610cb625 Mon Sep 17 00:00:00 2001
From: Kolya Panchenko <kolya.panchenko at sifive.com>
Date: Mon, 6 Nov 2023 09:40:17 -0800
Subject: [PATCH] [MLIR][RISCV] Add VCIX dialect
The changeset adds new dialect called VCIX to support VCIX intrinsics of XSfvcp
extension to allow MLIR users to interact with co-processors that are
compatible with that extension.
Source: https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
---
mlir/include/mlir/Conversion/Passes.td | 5 +-
mlir/include/mlir/Dialect/CMakeLists.txt | 1 +
mlir/include/mlir/Dialect/VCIX/CMakeLists.txt | 8 +
mlir/include/mlir/Dialect/VCIX/VCIX.td | 305 ++++++++++
mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h | 20 +
mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td | 91 +++
mlir/include/mlir/Dialect/VCIX/VCIXDialect.h | 28 +
mlir/include/mlir/InitAllDialects.h | 2 +
mlir/lib/Dialect/CMakeLists.txt | 1 +
mlir/lib/Dialect/VCIX/CMakeLists.txt | 1 +
mlir/lib/Dialect/VCIX/IR/CMakeLists.txt | 16 +
mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp | 12 +
mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp | 28 +
mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp | 174 ++++++
mlir/test/Dialect/VCIX/invalid.mlir | 57 ++
mlir/test/Dialect/VCIX/ops.mlir | 531 ++++++++++++++++++
16 files changed, 1279 insertions(+), 1 deletion(-)
create mode 100644 mlir/include/mlir/Dialect/VCIX/CMakeLists.txt
create mode 100644 mlir/include/mlir/Dialect/VCIX/VCIX.td
create mode 100644 mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h
create mode 100644 mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td
create mode 100644 mlir/include/mlir/Dialect/VCIX/VCIXDialect.h
create mode 100644 mlir/lib/Dialect/VCIX/CMakeLists.txt
create mode 100644 mlir/lib/Dialect/VCIX/IR/CMakeLists.txt
create mode 100644 mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp
create mode 100644 mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp
create mode 100644 mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp
create mode 100644 mlir/test/Dialect/VCIX/invalid.mlir
create mode 100644 mlir/test/Dialect/VCIX/ops.mlir
diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index 06756ff3df0bb3..c03b0137e6e0f5 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -1294,6 +1294,10 @@ def ConvertVectorToLLVMPass : Pass<"convert-vector-to-llvm"> {
"bool", /*default=*/"false",
"Enables the use of ArmSVE dialect while lowering the vector "
"dialect.">,
+ Option<"vcix", "enable-vcix",
+ "bool", /*default=*/"false",
+ "Enables the use of VCIX dialect while lowering the vector "
+ "dialect to RISC-V target">,
Option<"x86Vector", "enable-x86vector",
"bool", /*default=*/"false",
"Enables the use of X86Vector dialect while lowering the vector "
@@ -1310,5 +1314,4 @@ def ConvertVectorToSPIRV : Pass<"convert-vector-to-spirv"> {
let constructor = "mlir::createConvertVectorToSPIRVPass()";
let dependentDialects = ["spirv::SPIRVDialect"];
}
-
#endif // MLIR_CONVERSION_PASSES
diff --git a/mlir/include/mlir/Dialect/CMakeLists.txt b/mlir/include/mlir/Dialect/CMakeLists.txt
index 1c4569ecfa5848..1408ced218dbb2 100644
--- a/mlir/include/mlir/Dialect/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/CMakeLists.txt
@@ -37,5 +37,6 @@ add_subdirectory(Tosa)
add_subdirectory(Transform)
add_subdirectory(UB)
add_subdirectory(Utils)
+add_subdirectory(VCIX)
add_subdirectory(Vector)
add_subdirectory(X86Vector)
diff --git a/mlir/include/mlir/Dialect/VCIX/CMakeLists.txt b/mlir/include/mlir/Dialect/VCIX/CMakeLists.txt
new file mode 100644
index 00000000000000..2ed490283b3519
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/CMakeLists.txt
@@ -0,0 +1,8 @@
+add_mlir_dialect(VCIX vcix)
+add_mlir_doc(VCIXOps VCIXOps Dialects/ -gen-dialect-doc -dialect=vcix)
+
+set(LLVM_TARGET_DEFINITIONS VCIXAttrs.td)
+mlir_tablegen(VCIXDialectEnums.h.inc -gen-enum-decls)
+mlir_tablegen(VCIXDialectEnums.cpp.inc -gen-enum-defs)
+add_public_tablegen_target(MLIRVCIXDialectEnumIncGen)
+add_dependencies(mlir-headers MLIRVCIXDialectEnumIncGen)
diff --git a/mlir/include/mlir/Dialect/VCIX/VCIX.td b/mlir/include/mlir/Dialect/VCIX/VCIX.td
new file mode 100644
index 00000000000000..2b5bbeb22e97fc
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/VCIX.td
@@ -0,0 +1,305 @@
+//===-- VCIX.td - VCIX dialect operation definitions *- tablegen -*--------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// The file defines the basic operations for the VCIX dialect.
+//
+// The SiFive Vector Coprocessor Interface (VCIX) provides a flexible mechanism
+// to extend application processors with custom coprocessors and
+// variable-latency arithmetic units. The interface offers throughput comparable
+// to that of standard RISC-V vector instructions. To accelerate performance,
+// system designers may use VCIX as a low-latency, high-throughput interface to
+// a coprocessor
+//
+// https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
+//
+//===----------------------------------------------------------------------===//
+#ifndef VCIX
+#define VCIX
+
+include "mlir/IR/EnumAttr.td"
+include "mlir/IR/OpBase.td"
+include "mlir/Dialect/LLVMIR/LLVMOpBase.td"
+include "mlir/Interfaces/SideEffectInterfaces.td"
+
+include "mlir/Dialect/VCIX/VCIXAttrs.td"
+
+//===----------------------------------------------------------------------===//
+// VCIX dialect definition.
+//===----------------------------------------------------------------------===//
+
+def VCIX_Dialect : Dialect {
+ let name = "vcix";
+ let cppNamespace = "::mlir::vcix";
+ let description = [{
+ The SiFive Vector Coprocessor Interface (VCIX) provides a flexible mechanism
+ to extend application processors with custom coprocessors and
+ variable-latency arithmetic units. The interface offers throughput comparable
+ to that of standard RISC-V vector instructions. To accelerate performance,
+ system designers may use VCIX as a low-latency, high-throughput interface to
+ a coprocessor
+
+ https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
+ }];
+
+ let usePropertiesForAttributes = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// VCIX Ops
+//===----------------------------------------------------------------------===//
+class VCIX_Op<string mnemonic, list<Trait> traits = []>
+ : Op<VCIX_Dialect, mnemonic, traits> {}
+
+class VCIX_IntrinOp<string mnemonic, list<Trait> traits = []>
+ : LLVM_OpBase<VCIX_Dialect, "intrin." #mnemonic, traits> {}
+
+//===----------------------------------------------------------------------===//
+// Unary VCIX operations
+//===----------------------------------------------------------------------===//
+def VCIX_UnaryROOp : VCIX_Op<"unary.ro", []> {
+ let summary = "Unary VCIX operation with side effects and without result";
+ let description = [{
+ Unary VCIX operation that has some side effects and does not produce result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.x 0000-- 1 ----- xs1 100 ----- none scalar xs1
+ sf.vc.i 0000-- 1 ----- simm 011 ----- none simm[4:0]
+ ```
+ }];
+
+ let arguments = (ins AnyTypeOf<[I<64>, I<32>, I<5>]>: $op,
+ RVLType: $rvl,
+ OpcodeIntAttr: $opcode,
+ VCIX_SewLmulAttr: $sew_lmul,
+ RAttr: $rs2,
+ RAttr: $rd);
+
+ let assemblyFormat = [{
+ $sew_lmul $op `,` $rvl attr-dict `:` `(` type($op) `,` type($rvl) `)`
+ }];
+
+ let hasVerifier = 1;
+}
+
+def VCIX_UnaryOp : VCIX_Op<"unary", []> {
+ let summary = "unary VCIX operation";
+ let description = [{
+ Unary VCIX operation that produces result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.v.x 0000-- 0 ----- xs1 100 vd vector vd scalar xs1
+ sf.vc.v.i 0000-- 0 ----- simm 011 vd vector vd simm[4:0]
+ ```
+ }];
+
+ let arguments = (ins AnyTypeOf<[I<64>, I<32>, I<5>]>: $op,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode,
+ RAttr: $rs2);
+
+ let results = (outs VectorOfRank1: $result);
+
+ let assemblyFormat = [{
+ $op (`,` $rvl^)? attr-dict
+ `:` `(` type($op) (`,` type($rvl)^)? `)` `->` type($result)
+ }];
+
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Binary VCIX operations
+//===----------------------------------------------------------------------===//
+def VCIX_BinaryROOp : VCIX_Op<"binary.ro", []> {
+ let summary = "Read-only binary VCIX operation";
+ let description = [{
+ Read-only binary VCIX operation that does not produce result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.v.vv 0010-- 1 vs2 vs1 000 ----- none vector vs1, vector vs2
+ sf.vc.v.xv 0010-- 1 vs2 xs1 100 ----- none scalar xs1, vector vs2
+ sf.vc.v.iv 0010-- 1 vs2 simm 011 ----- none simm[4:0], vector vs2
+ sf.vc.v.fv 0010-- 1 vs2 fs1 101 ----- none scalar fs1, vector vs2
+ ```
+ }];
+
+ let arguments = (ins VectorOfRank1OrScalar: $op1,
+ VectorOfRank1: $op2,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode,
+ RAttr: $rd);
+
+ let assemblyFormat = [{
+ $op1 `,` $op2 (`,` $rvl^)? attr-dict `:`
+ `(` type($op1) `,` type($op2) (`,` type($rvl)^)? `)`
+ }];
+
+ let hasVerifier = 1;
+}
+
+def VCIX_BinaryOp : VCIX_Op<"binary", []> {
+ let summary = "binary VCIX operation";
+ let description = [{
+ Binary VCIX operation that produces result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.v.vv 0010-- 0 vs2 vs1 000 vd vector vd vector vs1, vector vs2
+ sf.vc.v.xv 0010-- 0 vs2 xs1 100 vd vector vd scalar xs1, vector vs2
+ sf.vc.v.iv 0010-- 0 vs2 simm 011 vd vector vd simm[4:0], vector vs2
+ sf.vc.v.fv 0010-- 0 vs2 fs1 101 vd vector vd scalar fs1, vector vs2
+ ```
+ }];
+
+ let arguments = (ins VectorOfRank1OrScalar: $op1,
+ VectorOfRank1: $op2,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode);
+
+ let results = (outs VectorOfRank1: $result);
+
+ let assemblyFormat = [{
+ $op1 `,` $op2 (`,` $rvl^)? attr-dict `:`
+ `(` type($op1) `,` type($op2) (`,` type($rvl)^)? `)` `->` type($result)
+ }];
+
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Ternary VCIX operations
+//===----------------------------------------------------------------------===//
+def VCIX_TernaryROOp : VCIX_Op<"ternary.ro", []> {
+ let summary = "Ternary VCIX operation";
+ let description = [{
+ Ternary VCIX operation that does not generate result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.vvv 1010-- 1 vs2 vs1 000 vd none vector vs1, vector vs2, vector vd
+ sf.vc.xvv 1010-- 1 vs2 xs1 100 vd none scalar xs1, vector vs2, vector vd
+ sf.vc.ivv 1010-- 1 vs2 simm 011 vd none simm[4:0], vector vs2, vector vd
+ sf.vc.fvv 10101- 1 vs2 fs1 101 vd none scalar fs1, vector vs2, vector vd
+ ```
+ }];
+
+ let arguments = (ins VectorOfRank1OrScalar: $op1,
+ VectorOfRank1: $op2,
+ VectorOfRank1: $op3,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode);
+
+ let assemblyFormat = [{
+ $op1 `,` $op2 `,` $op3 (`,` $rvl^)? attr-dict `:`
+ `(` type($op1) `,` type($op2) `,` type($op3) (`,` type($rvl)^)? `)`
+ }];
+
+ let hasVerifier = 1;
+}
+
+def VCIX_TernaryOp : VCIX_Op<"ternary", []> {
+ let summary = "Ternary VCIX operation";
+ let description = [{
+ Ternary VCIX operation that produces result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.v.vvv 1010-- 0 vs2 vs1 000 vd vector vd vector vs1, vector vs2, vector vd
+ sf.vc.v.xvv 1010-- 0 vs2 xs1 100 vd vector vd scalar xs1, vector vs2, vector vd
+ sf.vc.v.ivv 1010-- 0 vs2 simm 011 vd vector vd simm[4:0], vector vs2, vector vd
+ sf.vc.v.fvv 10101- 0 vs2 fs1 101 vd vector vd scalar fs1, vector vs2, vector vd
+ ```
+ }];
+
+ let arguments = (ins VectorOfRank1OrScalar: $op1,
+ VectorOfRank1: $op2,
+ VectorOfRank1: $op3,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode);
+
+ let results = (outs VectorOfRank1: $result);
+
+ let assemblyFormat = [{
+ $op1 `,` $op2 `,` $op3 (`,` $rvl^)? attr-dict `:`
+ `(` type($op1) `,` type($op2) `,` type($op3) (`,` type($rvl)^)? `)` `->` type($result)
+ }];
+
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Wide ternary VCIX operations
+//===----------------------------------------------------------------------===//
+def VCIX_WideTernaryROOp : VCIX_Op<"wide.ternary.ro", []> {
+ let summary = "Ternary VCIX operation";
+ let description = [{
+ Wide Ternary VCIX operation that does not produce result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.vvw 1111-- 1 vs2 vs1 000 vd none vector vs1, vector vs2, wide vd
+ sf.vc.xvw 1111-- 1 vs2 xs1 100 vd none scalar xs1, vector vs2, wide vd
+ sf.vc.ivw 1111-- 1 vs2 simm 011 vd none simm[4:0], vector vs2, wide vd
+ sf.vc.fvw 11111- 1 vs2 fs1 101 vd none scalar fs1, vector vs2, wide vd
+ ```
+ }];
+
+ let arguments = (ins VectorOfRank1OrScalar: $op1,
+ VectorOfRank1: $op2,
+ VectorOfRank1: $op3,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode);
+
+ let assemblyFormat = [{
+ $op1 `,` $op2 `,` $op3 (`,` $rvl^)? attr-dict `:`
+ `(` type($op1) `,` type($op2) `,` type($op3) (`,` type($rvl)^)? `)`
+ }];
+
+ let hasVerifier = 1;
+}
+
+def VCIX_WideTernaryOp : VCIX_Op<"wide.ternary", []> {
+ let summary = "Ternary VCIX operation";
+ let description = [{
+ Wide Ternary VCIX operation that produces result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.v.vvw 1111-- 0 vs2 vs1 000 vd wide vd vector vs1, vector vs2, wide vd
+ sf.vc.v.xvw 1111-- 0 vs2 xs1 100 vd wide vd scalar xs1, vector vs2, wide vd
+ sf.vc.v.ivw 1111-- 0 vs2 simm 011 vd wide vd simm[4:0], vector vs2, wide vd
+ sf.vc.v.fvw 11111- 0 vs2 fs1 101 vd wide vd scalar fs1, vector vs2, wide vd
+ ```
+ }];
+
+ let arguments = (ins VectorOfRank1OrScalar: $op1,
+ VectorOfRank1: $op2,
+ VectorOfRank1: $op3,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode);
+
+ let results = (outs VectorOfRank1: $result);
+
+ let assemblyFormat = [{
+ $op1 `,` $op2 `,` $op3 (`,` $rvl^)? attr-dict `:`
+ `(` type($op1) `,` type($op2) `,` type($op3) (`,` type($rvl)^)? `)` `->` type($result)
+ }];
+
+ let hasVerifier = 1;
+}
+#endif // VCIX
diff --git a/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h b/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h
new file mode 100644
index 00000000000000..95b66ee2b9a924
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h
@@ -0,0 +1,20 @@
+//===- VCIXAttr.h - VCIX Dialect Attribute Definition -*- C++ -----------*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_VCIX_TRANSFORMATTRS_H
+#define MLIR_DIALECT_VCIX_TRANSFORMATTRS_H
+
+#include "mlir/IR/Attributes.h"
+#include "mlir/IR/BuiltinAttributes.h"
+
+#include <cstdint>
+#include <optional>
+
+#include "mlir/Dialect/VCIX/VCIXDialectEnums.h.inc"
+
+#endif // MLIR_DIALECT_VCIX_TRANSFORMATTRS_H
diff --git a/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td b/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td
new file mode 100644
index 00000000000000..7f3ecc9a3f2d3c
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td
@@ -0,0 +1,91 @@
+//===- VCIXAttrs.td - VCIX dialect attributes ----*- tablegen -----------*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef MLIR_DIALECT_VCIX_VCIXATTRS
+#define MLIR_DIALECT_VCIX_VCIXATTRS
+
+include "mlir/IR/EnumAttr.td"
+
+//===----------------------------------------------------------------------===//
+// VCIX helper type definitions
+//===----------------------------------------------------------------------===//
+def VectorOfRank1 : AnyTypeOf<[ScalableVectorOfRank<[1]>, VectorOfRank<[1]>]>;
+def VectorOfRank1OrScalar
+ : AnyTypeOf<[VectorOfRank1, I<64>, I<32>, F<16>, F<32>, F<64>, I<5>]>;
+def OpcodeI1Attr : AnyIntegerAttrBase<AnyI<1>, "1-bit integer attribute">;
+def OpcodeI2Attr : AnyIntegerAttrBase<AnyI<2>, "2-bit integer attribute">;
+def OpcodeIntAttr : AnyAttrOf<[OpcodeI1Attr, OpcodeI2Attr]>;
+def RAttr : AnyAttrOf<[AnyIntegerAttrBase<AnyI<5>, "5-bit integer attribute">]>;
+def RVLType : AnyTypeOf<[UI<64>, UI<32>]>;
+
+// Special version for intrinsic version where int attr is zext to i32 or i64
+// depending on xlen of the target
+def VectorOfRank1OrScalarIntrin
+ : AnyTypeOf<[VectorOfRank1, I<64>, I<32>, F<16>, F<32>, F<64>]>;
+def OpcodeIntrinIntAttr : AnyAttrOf<[I64Attr, I32Attr]>;
+def RIntrinAttr : AnyAttrOf<[I64Attr, I32Attr]>;
+def RVLIntrinType : AnyTypeOf<[I<64>, I<32>]>;
+
+def VCIX_e8mf8 : I32EnumAttrCase<"e8mf8", 0, "e8mf8">;
+def VCIX_e8mf4 : I32EnumAttrCase<"e8mf4", 1, "e8mf4">;
+def VCIX_e8mf2 : I32EnumAttrCase<"e8mf2", 2, "e8mf2">;
+def VCIX_e8m1 : I32EnumAttrCase<"e8m1", 3, "e8m1">;
+def VCIX_e8m2 : I32EnumAttrCase<"e8m2", 4, "e8m2">;
+def VCIX_e8m4 : I32EnumAttrCase<"e8m4", 5, "e8m4">;
+def VCIX_e8m8 : I32EnumAttrCase<"e8m8", 6, "e8m8">;
+
+def VCIX_e16mf4 : I32EnumAttrCase<"e16mf4", 7, "e16mf4">;
+def VCIX_e16mf2 : I32EnumAttrCase<"e16mf2", 8, "e16mf2">;
+def VCIX_e16m1 : I32EnumAttrCase<"e16m1", 9, "e16m1">;
+def VCIX_e16m2 : I32EnumAttrCase<"e16m2", 10, "e16m2">;
+def VCIX_e16m4 : I32EnumAttrCase<"e16m4", 11, "e16m4">;
+def VCIX_e16m8 : I32EnumAttrCase<"e16m8", 12, "e16m8">;
+
+def VCIX_e32mf2 : I32EnumAttrCase<"e32mf2", 13, "e32mf2">;
+def VCIX_e32m1 : I32EnumAttrCase<"e32m1", 14, "e32m1">;
+def VCIX_e32m2 : I32EnumAttrCase<"e32m2", 15, "e32m2">;
+def VCIX_e32m4 : I32EnumAttrCase<"e32m4", 16, "e32m4">;
+def VCIX_e32m8 : I32EnumAttrCase<"e32m8", 17, "e32m8">;
+
+def VCIX_e64m1 : I32EnumAttrCase<"e64m1", 18, "e64m1">;
+def VCIX_e64m2 : I32EnumAttrCase<"e64m2", 19, "e64m2">;
+def VCIX_e64m4 : I32EnumAttrCase<"e64m4", 20, "e64m4">;
+def VCIX_e64m8 : I32EnumAttrCase<"e64m8", 21, "e64m8">;
+
+def VCIX_SewLmulAttr : I32EnumAttr<"SewLmul",
+ "A list of all possible SEW and LMUL",
+ [
+ VCIX_e8mf8,
+ VCIX_e8mf4,
+ VCIX_e8mf2,
+ VCIX_e8m1,
+ VCIX_e8m2,
+ VCIX_e8m4,
+ VCIX_e8m8,
+
+ VCIX_e16mf4,
+ VCIX_e16mf2,
+ VCIX_e16m1,
+ VCIX_e16m2,
+ VCIX_e16m4,
+ VCIX_e16m8,
+
+ VCIX_e32mf2,
+ VCIX_e32m1,
+ VCIX_e32m2,
+ VCIX_e32m4,
+ VCIX_e32m8,
+
+ VCIX_e64m1,
+ VCIX_e64m2,
+ VCIX_e64m4,
+ VCIX_e64m8,
+ ]> {
+ let cppNamespace = "::mlir::vcix";
+}
+
+#endif // MLIR_DIALECT_VCIX_VCIXATTRS
diff --git a/mlir/include/mlir/Dialect/VCIX/VCIXDialect.h b/mlir/include/mlir/Dialect/VCIX/VCIXDialect.h
new file mode 100644
index 00000000000000..0e795f42f58dee
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/VCIXDialect.h
@@ -0,0 +1,28 @@
+//===- VCIXDialect.h - MLIR Dialect for VCIX --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Target dialect for VCIX in MLIR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_VCIX_VCIXDIALECT_H_
+#define MLIR_DIALECT_VCIX_VCIXDIALECT_H_
+
+#include "mlir/Bytecode/BytecodeOpInterface.h"
+#include "mlir/IR/BuiltinTypes.h"
+#include "mlir/IR/Dialect.h"
+#include "mlir/IR/OpDefinition.h"
+#include "mlir/Interfaces/SideEffectInterfaces.h"
+
+#include "mlir/Dialect/VCIX/VCIXAttrs.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h.inc"
+
+#define GET_OP_CLASSES
+#include "mlir/Dialect/VCIX/VCIX.h.inc"
+
+#endif // MLIR_DIALECT_VCIX_VCIXDIALECT_H_
diff --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h
index 19a62cadaa2e04..3dcb4a41e9d17c 100644
--- a/mlir/include/mlir/InitAllDialects.h
+++ b/mlir/include/mlir/InitAllDialects.h
@@ -83,6 +83,7 @@
#include "mlir/Dialect/Transform/IR/TransformDialect.h"
#include "mlir/Dialect/Transform/PDLExtension/PDLExtension.h"
#include "mlir/Dialect/UB/IR/UBOps.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.h"
#include "mlir/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.h"
@@ -137,6 +138,7 @@ inline void registerAllDialects(DialectRegistry ®istry) {
tosa::TosaDialect,
transform::TransformDialect,
ub::UBDialect,
+ vcix::VCIXDialect,
vector::VectorDialect,
x86vector::X86VectorDialect>();
// clang-format on
diff --git a/mlir/lib/Dialect/CMakeLists.txt b/mlir/lib/Dialect/CMakeLists.txt
index 68776a695cac4d..c1e3bd6998ef87 100644
--- a/mlir/lib/Dialect/CMakeLists.txt
+++ b/mlir/lib/Dialect/CMakeLists.txt
@@ -37,6 +37,7 @@ add_subdirectory(Tosa)
add_subdirectory(Transform)
add_subdirectory(UB)
add_subdirectory(Utils)
+add_subdirectory(VCIX)
add_subdirectory(Vector)
add_subdirectory(X86Vector)
diff --git a/mlir/lib/Dialect/VCIX/CMakeLists.txt b/mlir/lib/Dialect/VCIX/CMakeLists.txt
new file mode 100644
index 00000000000000..f33061b2d87cff
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/CMakeLists.txt
@@ -0,0 +1 @@
+add_subdirectory(IR)
diff --git a/mlir/lib/Dialect/VCIX/IR/CMakeLists.txt b/mlir/lib/Dialect/VCIX/IR/CMakeLists.txt
new file mode 100644
index 00000000000000..063d76b04b884c
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/IR/CMakeLists.txt
@@ -0,0 +1,16 @@
+add_mlir_dialect_library(MLIRVCIXDialect
+ VCIXDialect.cpp
+ VCIXAttrs.cpp
+ VCIXOps.cpp
+
+ ADDITIONAL_HEADER_DIRS
+ ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/VCIX
+
+ DEPENDS
+ MLIRVCIXIncGen
+
+ LINK_LIBS PUBLIC
+ MLIRIR
+ MLIRLLVMDialect
+ MLIRSideEffectInterfaces
+ )
diff --git a/mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp b/mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp
new file mode 100644
index 00000000000000..d0562ae451ca11
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp
@@ -0,0 +1,12 @@
+//===- VCIXAttrs.cpp - VCIX Dialect Attribute Definitions -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/VCIX/VCIXAttrs.h"
+#include "mlir/IR/BuiltinTypes.h"
+
+#include "mlir/Dialect/VCIX/VCIXDialectEnums.cpp.inc"
diff --git a/mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp b/mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp
new file mode 100644
index 00000000000000..aa9260e1dbf6bc
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp
@@ -0,0 +1,28 @@
+//===- VCIXDialect.cpp - MLIR VCIX ops implementation ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the VCIX dialect and its operations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
+#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/OpImplementation.h"
+#include "mlir/IR/TypeUtilities.h"
+
+using namespace mlir;
+
+#include "mlir/Dialect/VCIX/VCIXDialect.cpp.inc"
+
+void vcix::VCIXDialect::initialize() {
+ addOperations<
+#define GET_OP_LIST
+#include "mlir/Dialect/VCIX/VCIX.cpp.inc"
+ >();
+}
diff --git a/mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp b/mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp
new file mode 100644
index 00000000000000..7c1521d246b7d3
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp
@@ -0,0 +1,174 @@
+//===- VCIXOps.cpp - VCIX dialect operations ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
+#include "mlir/Dialect/VCIX/VCIXAttrs.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/Diagnostics.h"
+#include "mlir/IR/OpImplementation.h"
+#include "mlir/IR/TypeUtilities.h"
+#include "mlir/IR/Verifier.h"
+
+using namespace mlir;
+
+#define GET_OP_CLASSES
+#include "mlir/Dialect/VCIX/VCIX.cpp.inc"
+
+static LogicalResult verifyOpcode(Attribute opcodeAttr,
+ const unsigned expectedBitSize) {
+ if (auto intAttr = opcodeAttr.dyn_cast<IntegerAttr>())
+ return LogicalResult::success(intAttr.getType().isInteger(expectedBitSize));
+ return failure();
+}
+
+static LogicalResult isWidenType(Type from, Type to) {
+ if (isa<IntegerType>(from)) {
+ return LogicalResult::success(2 * from.cast<IntegerType>().getWidth() ==
+ to.cast<IntegerType>().getWidth());
+ }
+ if (isa<FloatType>(from)) {
+ if (from.isF16() && to.isF32())
+ return success();
+ if (from.isF32() && to.isF64())
+ return success();
+ }
+ return failure();
+}
+
+// Return true if type is a scalable vector that encodes LMUL and SEW correctly
+// https://lists.llvm.org/pipermail/llvm-dev/2020-October/145850.html
+static LogicalResult verifyVectorType(Type t) {
+ auto vt = t.dyn_cast<VectorType>();
+ if (!vt || vt.getRank() != 1)
+ return failure();
+ if (!vt.isScalable())
+ return success();
+
+ Type eltTy = vt.getElementType();
+ unsigned sew = 0;
+ if (eltTy.isF32())
+ sew = 32;
+ else if (eltTy.isF64())
+ sew = 64;
+ else if (auto intTy = eltTy.dyn_cast<IntegerType>())
+ sew = intTy.getWidth();
+ else
+ return failure();
+
+ unsigned eltCount = vt.getShape()[0];
+ const unsigned lmul = eltCount * sew / 64;
+ return lmul > 8 ? failure() : success();
+}
+
+template <typename OpT>
+static LogicalResult verifyVCIXOpCommon(OpT op, Value result) {
+ Type op1Type = op.getOp1().getType();
+ VectorType op2Type = op.getOp2().getType().template cast<VectorType>();
+ if (result && op2Type != result.getType())
+ return op.emitOpError("Result type does not match to op2 type");
+
+ if (failed(verifyVectorType(op2Type)))
+ return op.emitOpError(
+ "used type does not represent RVV-compatible scalable vector type");
+
+ if (!op2Type.isScalable() && op.getRvl())
+ return op.emitOpError(
+ "'rvl' must not be specified if operation is done on a "
+ "fixed vector type");
+
+ if (op1Type.isa<VectorType>() && op1Type != op2Type)
+ return op.emitOpError("op1 type does not match to op2 type");
+
+ if (op1Type.isa<FloatType>()) {
+ if (failed(verifyOpcode(op.getOpcodeAttr(), 1)))
+ return op.emitOpError(
+ "with a floating point scalar can only use 1-bit opcode");
+ return success();
+ }
+ if (failed(verifyOpcode(op.getOpcodeAttr(), 2)))
+ return op.emitOpError("must use 2-bit opcode");
+
+ if (op1Type.isInteger(5)) {
+ Operation *defOp = op.getOp1().getDefiningOp();
+ if (!defOp || !defOp->hasTrait<OpTrait::ConstantLike>())
+ return op.emitOpError("immediate operand must be a constant");
+ return success();
+ }
+ if (op1Type.isa<IntegerType>() && !op1Type.isInteger(32) &&
+ !op1Type.isInteger(64))
+ return op.emitOpError(
+ "non-constant integer first operand must be of a size 32 or 64");
+ return success();
+}
+
+/// Unary operations
+LogicalResult vcix::UnaryROOp::verify() {
+ if (failed(verifyOpcode(getOpcodeAttr(), 2)))
+ return emitOpError("must use 2-bit opcode");
+ return success();
+}
+
+LogicalResult vcix::UnaryOp::verify() {
+ if (failed(verifyOpcode(getOpcodeAttr(), 2)))
+ return emitOpError("must use 2-bit opcode");
+
+ if (failed(verifyVectorType(getResult().getType())))
+ return emitOpError(
+ "result type does not represent RVV-compatible scalable vector type");
+
+ return success();
+}
+
+/// Binary operations
+LogicalResult vcix::BinaryROOp::verify() {
+ return verifyVCIXOpCommon(*this, nullptr);
+}
+
+LogicalResult vcix::BinaryOp::verify() {
+ return verifyVCIXOpCommon(*this, getResult());
+}
+
+/// Ternary operations
+LogicalResult vcix::TernaryROOp::verify() {
+ VectorType op2Type = getOp2().getType().cast<VectorType>();
+ VectorType op3Type = getOp3().getType().cast<VectorType>();
+ if (op2Type != op3Type) {
+ return emitOpError("op3 type does not match to op2 type");
+ }
+ return verifyVCIXOpCommon(*this, nullptr);
+}
+
+LogicalResult vcix::TernaryOp::verify() {
+ VectorType op2Type = getOp2().getType().cast<VectorType>();
+ VectorType op3Type = getOp3().getType().cast<VectorType>();
+ if (op2Type != op3Type)
+ return emitOpError("op3 type does not match to op2 type");
+
+ return verifyVCIXOpCommon(*this, getResult());
+}
+
+/// Wide Ternary operations
+LogicalResult vcix::WideTernaryROOp::verify() {
+ VectorType op2Type = getOp2().getType().cast<VectorType>();
+ VectorType op3Type = getOp3().getType().cast<VectorType>();
+ if (failed(isWidenType(op2Type.getElementType(), op3Type.getElementType())))
+ return emitOpError("result type is not widened type of op2");
+
+ return verifyVCIXOpCommon(*this, nullptr);
+}
+
+LogicalResult vcix::WideTernaryOp::verify() {
+ VectorType op2Type = getOp2().getType().cast<VectorType>();
+ VectorType op3Type = getOp3().getType().cast<VectorType>();
+ if (failed(isWidenType(op2Type.getElementType(), op3Type.getElementType())))
+ return emitOpError("result type is not widened type of op2");
+
+ // Don't compare result type for widended operations
+ return verifyVCIXOpCommon(*this, nullptr);
+}
diff --git a/mlir/test/Dialect/VCIX/invalid.mlir b/mlir/test/Dialect/VCIX/invalid.mlir
new file mode 100644
index 00000000000000..65c783dc852002
--- /dev/null
+++ b/mlir/test/Dialect/VCIX/invalid.mlir
@@ -0,0 +1,57 @@
+// RUN: mlir-opt %s -split-input-file -verify-diagnostics
+
+// -----
+func.func @unary_e8mf2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ // expected-error at +1 {{must use 2-bit opcode}}
+ vcix.unary.ro e8mf2 %const, %rvl { opcode = 1 : i1, rs2 = 30 : i5, rd = 31 : i5 } : (i32, ui32)
+ return
+}
+// -----
+func.func @binary_fv(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ // expected-error at +1 {{with a floating point scalar can only use 1-bit opcode}}
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 2 : i2 } : (f32, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @ternary_fvv(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ // expected-error at +1 {{with a floating point scalar can only use 1-bit opcode}}
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 2 : i2 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @wide_ternary_fvw(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+ // expected-error at +1 {{with a floating point scalar can only use 1-bit opcode}}
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 2 : i2 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %0 : vector<[4] x f64>
+}
+
+// -----
+func.func @wide_ternary_vvw(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ // expected-error at +1 {{result type is not widened type of op2}}
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @binary_fv_wrong_vtype(%op1: f32, %op2 : vector<[32] x f32>, %rvl : ui32) -> vector<[32] x f32> {
+ // expected-error at +1 {{used type does not represent RVV-compatible scalable vector type}}
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<[32] x f32>, ui32) -> vector<[32] x f32>
+ return %0 : vector<[32] x f32>
+}
+
+// -----
+func.func @binary_fv_vls_rvl(%op1: f32, %op2 : vector<4 x f32>, %rvl : ui32) -> vector<4 x f32> {
+ // expected-error at +1 {{'rvl' must not be specified if operation is done on a fixed vector type}}
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<4 x f32>, ui32) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// -----
+func.func @binary_nonconst(%val: i5, %op: vector<[4] x f32>, %rvl: ui32) {
+ // expected-error at +1 {{immediate operand must be a constant}}
+ vcix.binary.ro %val, %op, %rvl { opcode = 1 : i2, rd = 30 : i5 } : (i5, vector<[4] x f32>, ui32)
+ return
+}
diff --git a/mlir/test/Dialect/VCIX/ops.mlir b/mlir/test/Dialect/VCIX/ops.mlir
new file mode 100644
index 00000000000000..c5392d67d50d50
--- /dev/null
+++ b/mlir/test/Dialect/VCIX/ops.mlir
@@ -0,0 +1,531 @@
+// RUN: mlir-opt %s -split-input-file -verify-diagnostics
+
+// -----
+func.func @unary_ro_e8mf8(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8mf8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e8mf4(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e8mf2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e8m1(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e8m2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e8m4(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e8m8(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// -----
+func.func @unary_e8mf8(%rvl: ui32) -> vector<[1] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i8>
+ return %0 : vector<[1] x i8>
+}
+
+func.func @unary_e8mf4(%rvl: ui32) -> vector<[2] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i8>
+ return %0 : vector<[2] x i8>
+}
+
+func.func @unary_e8mf2(%rvl: ui32) -> vector<[4] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i8>
+ return %0 : vector<[4] x i8>
+}
+
+func.func @unary_e8m1(%rvl: ui32) -> vector<[8] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i8>
+ return %0 : vector<[8] x i8>
+}
+
+func.func @unary_e8m2(%rvl: ui32) -> vector<[16] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i8>
+ return %0 : vector<[16] x i8>
+}
+
+func.func @unary_e8m4(%rvl: ui32) -> vector<[32] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[32] x i8>
+ return %0 : vector<[32] x i8>
+}
+
+func.func @unary_e8m8(%rvl: ui32) -> vector<[64] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[64] x i8>
+ return %0 : vector<[64] x i8>
+}
+
+// -----
+func.func @unary_ro_e16mf4(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e16mf2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e16m1(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e16m2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e16m4(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e16m8(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// -----
+func.func @unary_e16mf4(%rvl: ui32) -> vector<[1] x i16>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i16>
+ return %0 : vector<[1] x i16>
+}
+
+func.func @unary_e16mf2(%rvl: ui32) -> vector<[2] x i16>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i16>
+ return %0 : vector<[2] x i16>
+}
+
+func.func @unary_e16m1(%rvl: ui32) -> vector<[4] x i16>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i16>
+ return %0 : vector<[4] x i16>
+}
+
+func.func @unary_e16m2(%rvl: ui32) -> vector<[8] x i16>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i16>
+ return %0 : vector<[8] x i16>
+}
+
+func.func @unary_e16m4(%rvl: ui32) -> vector<[16] x i16>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i16>
+ return %0 : vector<[16] x i16>
+}
+
+func.func @unary_e16m8(%rvl: ui32) -> vector<[32] x i16>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[32] x i16>
+ return %0 : vector<[32] x i16>
+}
+
+// -----
+func.func @unary_ro_e32mf2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e32m1(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e32m2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e32m4(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e32m8(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// -----
+func.func @unary_e32mf2(%rvl: ui32) -> vector<[1] x i32>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i32>
+ return %0 : vector<[1] x i32>
+}
+
+func.func @unary_e32m1(%rvl: ui32) -> vector<[2] x i32>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i32>
+ return %0 : vector<[2] x i32>
+}
+
+func.func @unary_e32m2(%rvl: ui32) -> vector<[4] x i32>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i32>
+ return %0 : vector<[4] x i32>
+}
+
+func.func @unary_e32m4(%rvl: ui32) -> vector<[8] x i32>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i32>
+ return %0 : vector<[8] x i32>
+}
+
+func.func @unary_e32m8(%rvl: ui32) -> vector<[16] x i32>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i32>
+ return %0 : vector<[16] x i32>
+}
+
+// -----
+func.func @unary_ro_e64m1(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e64m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e64m2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e64m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e64m4(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e64m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e64m8(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e64m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// -----
+func.func @unary_e64m1(%rvl: ui32) -> vector<[1] x i64>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i64>
+ return %0 : vector<[1] x i64>
+}
+
+func.func @unary_e64m2(%rvl: ui32) -> vector<[2] x i64>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i64>
+ return %0 : vector<[2] x i64>
+}
+
+func.func @unary_e64m4(%rvl: ui32) -> vector<[4] x i64>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i64>
+ return %0 : vector<[4] x i64>
+}
+
+func.func @unary_e64m8(%rvl: ui32) -> vector<[8] x i64>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i64>
+ return %0 : vector<[8] x i64>
+}
+
+// -----
+func.func @binary_vv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui32) {
+ vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @binary_vv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+func.func @binary_xv_ro(%op1: i32, %op2 : vector<[4] x f32>, %rvl : ui32) {
+ vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i32, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @binary_xv(%op1: i32, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+func.func @binary_fv_ro(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui32) {
+ vcix.binary.ro %op1, %op2, %rvl { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @binary_fv(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+func.func @binary_iv_ro(%op2 : vector<[4] x f32>, %rvl : ui32) {
+ %const = arith.constant 1 : i5
+ vcix.binary.ro %const, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @binary_iv(%op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %const = arith.constant 1 : i5
+ %0 = vcix.binary %const, %op2, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @ternary_vvv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) {
+ vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @ternary_vvv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+func.func @ternary_xvv_ro(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) {
+ vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @ternary_xvv(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+func.func @ternary_fvv_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) {
+ vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @ternary_fvv(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+func.func @ternary_ivv_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) {
+ %const = arith.constant 1 : i5
+ vcix.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %const = arith.constant 1 : i5
+ %0 = vcix.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @wide_ternary_vvw_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) {
+ vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui32)
+ return
+}
+
+func.func @wide_ternary_vvw(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %0: vector<[4] x f64>
+}
+
+func.func @wide_ternary_xvw_ro(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) {
+ vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f64>, ui32)
+ return
+}
+
+func.func @wide_ternary_xvw(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %0 : vector<[4] x f64>
+}
+
+func.func @wide_ternary_fvw_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) {
+ vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui32)
+ return
+}
+
+func.func @wide_ternary_fvw(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %op3 : vector<[4] x f64>
+}
+
+func.func @wide_ternary_ivw_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) {
+ %const = arith.constant 1 : i5
+ vcix.wide.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui32)
+ return
+}
+
+func.func @wide_ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+ %const = arith.constant 1 : i5
+ %0 = vcix.wide.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %op3 : vector<[4] x f64>
+}
+
+// -----
+func.func @fixed_binary_vv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) {
+ vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_binary_vv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) -> vector<4 x f32> {
+ %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+func.func @fixed_binary_xv_ro(%op1: i32, %op2 : vector<4 x f32>) {
+ vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i32, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_binary_xv(%op1: i32, %op2 : vector<4 x f32>) -> vector<4 x f32> {
+ %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (i32, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+func.func @fixed_binary_fv_ro(%op1: f32, %op2 : vector<4 x f32>) {
+ vcix.binary.ro %op1, %op2 { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_binary_fv(%op1: f32, %op2 : vector<4 x f32>) -> vector<4 x f32> {
+ %0 = vcix.binary %op1, %op2 { opcode = 1 : i1 } : (f32, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+func.func @fixed_binary_iv_ro(%op2 : vector<4 x f32>) {
+ %const = arith.constant 1 : i5
+ vcix.binary.ro %const, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_binary_iv(%op2 : vector<4 x f32>) -> vector<4 x f32> {
+ %const = arith.constant 1 : i5
+ %0 = vcix.binary %const, %op2 { opcode = 3 : i2 } : (i5, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// -----
+func.func @fixed_ternary_vvv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+ vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_ternary_vvv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+ %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+func.func @fixed_ternary_xvv_ro(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+ vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_ternary_xvv(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+ %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+func.func @fixed_ternary_fvv_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+ vcix.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_ternary_fvv(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+ %0 = vcix.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+func.func @fixed_ternary_ivv_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+ %const = arith.constant 1 : i5
+ vcix.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+ %const = arith.constant 1 : i5
+ %0 = vcix.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// -----
+func.func @fixed_wide_ternary_vvw_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+ vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+func.func @fixed_wide_ternary_vvw(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+ %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %0 : vector<4 x f64>
+}
+
+func.func @fixed_wide_ternary_xvw_ro(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+ vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+func.func @fixed_wide_ternary_xvw(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+ %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %0 : vector<4 x f64>
+}
+
+func.func @fixed_wide_ternary_fvw_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+ vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+func.func @fixed_wide_ternary_fvw(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+ %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %op3 : vector<4 x f64>
+}
+
+func.func @fixed_wide_ternary_ivw_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+ %const = arith.constant 1 : i5
+ vcix.wide.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+func.func @fixed_wide_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+ %const = arith.constant 1 : i5
+ %0 = vcix.wide.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %op3 : vector<4 x f64>
+}
More information about the Mlir-commits
mailing list