[Mlir-commits] [mlir] [MLIR][RISCV] Add VCIX legalization to `VectorToLLVM` pass (PR #74781)
Kolya Panchenko
llvmlistbot at llvm.org
Thu Dec 7 15:25:53 PST 2023
https://github.com/nikolaypanchenko created https://github.com/llvm/llvm-project/pull/74781
The changeset continues the work on the VCIX dialect by introducing legalization part to `VectorToLLVM` pass.
>From 99f02eb6c0302ee591a8b2c5efbb638c610cb625 Mon Sep 17 00:00:00 2001
From: Kolya Panchenko <kolya.panchenko at sifive.com>
Date: Mon, 6 Nov 2023 09:40:17 -0800
Subject: [PATCH 1/2] [MLIR][RISCV] Add VCIX dialect
The changeset adds new dialect called VCIX to support VCIX intrinsics of XSfvcp
extension to allow MLIR users to interact with co-processors that are
compatible with that extension.
Source: https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
---
mlir/include/mlir/Conversion/Passes.td | 5 +-
mlir/include/mlir/Dialect/CMakeLists.txt | 1 +
mlir/include/mlir/Dialect/VCIX/CMakeLists.txt | 8 +
mlir/include/mlir/Dialect/VCIX/VCIX.td | 305 ++++++++++
mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h | 20 +
mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td | 91 +++
mlir/include/mlir/Dialect/VCIX/VCIXDialect.h | 28 +
mlir/include/mlir/InitAllDialects.h | 2 +
mlir/lib/Dialect/CMakeLists.txt | 1 +
mlir/lib/Dialect/VCIX/CMakeLists.txt | 1 +
mlir/lib/Dialect/VCIX/IR/CMakeLists.txt | 16 +
mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp | 12 +
mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp | 28 +
mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp | 174 ++++++
mlir/test/Dialect/VCIX/invalid.mlir | 57 ++
mlir/test/Dialect/VCIX/ops.mlir | 531 ++++++++++++++++++
16 files changed, 1279 insertions(+), 1 deletion(-)
create mode 100644 mlir/include/mlir/Dialect/VCIX/CMakeLists.txt
create mode 100644 mlir/include/mlir/Dialect/VCIX/VCIX.td
create mode 100644 mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h
create mode 100644 mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td
create mode 100644 mlir/include/mlir/Dialect/VCIX/VCIXDialect.h
create mode 100644 mlir/lib/Dialect/VCIX/CMakeLists.txt
create mode 100644 mlir/lib/Dialect/VCIX/IR/CMakeLists.txt
create mode 100644 mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp
create mode 100644 mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp
create mode 100644 mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp
create mode 100644 mlir/test/Dialect/VCIX/invalid.mlir
create mode 100644 mlir/test/Dialect/VCIX/ops.mlir
diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index 06756ff3df0bb3..c03b0137e6e0f5 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -1294,6 +1294,10 @@ def ConvertVectorToLLVMPass : Pass<"convert-vector-to-llvm"> {
"bool", /*default=*/"false",
"Enables the use of ArmSVE dialect while lowering the vector "
"dialect.">,
+ Option<"vcix", "enable-vcix",
+ "bool", /*default=*/"false",
+ "Enables the use of VCIX dialect while lowering the vector "
+ "dialect to RISC-V target">,
Option<"x86Vector", "enable-x86vector",
"bool", /*default=*/"false",
"Enables the use of X86Vector dialect while lowering the vector "
@@ -1310,5 +1314,4 @@ def ConvertVectorToSPIRV : Pass<"convert-vector-to-spirv"> {
let constructor = "mlir::createConvertVectorToSPIRVPass()";
let dependentDialects = ["spirv::SPIRVDialect"];
}
-
#endif // MLIR_CONVERSION_PASSES
diff --git a/mlir/include/mlir/Dialect/CMakeLists.txt b/mlir/include/mlir/Dialect/CMakeLists.txt
index 1c4569ecfa5848..1408ced218dbb2 100644
--- a/mlir/include/mlir/Dialect/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/CMakeLists.txt
@@ -37,5 +37,6 @@ add_subdirectory(Tosa)
add_subdirectory(Transform)
add_subdirectory(UB)
add_subdirectory(Utils)
+add_subdirectory(VCIX)
add_subdirectory(Vector)
add_subdirectory(X86Vector)
diff --git a/mlir/include/mlir/Dialect/VCIX/CMakeLists.txt b/mlir/include/mlir/Dialect/VCIX/CMakeLists.txt
new file mode 100644
index 00000000000000..2ed490283b3519
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/CMakeLists.txt
@@ -0,0 +1,8 @@
+add_mlir_dialect(VCIX vcix)
+add_mlir_doc(VCIXOps VCIXOps Dialects/ -gen-dialect-doc -dialect=vcix)
+
+set(LLVM_TARGET_DEFINITIONS VCIXAttrs.td)
+mlir_tablegen(VCIXDialectEnums.h.inc -gen-enum-decls)
+mlir_tablegen(VCIXDialectEnums.cpp.inc -gen-enum-defs)
+add_public_tablegen_target(MLIRVCIXDialectEnumIncGen)
+add_dependencies(mlir-headers MLIRVCIXDialectEnumIncGen)
diff --git a/mlir/include/mlir/Dialect/VCIX/VCIX.td b/mlir/include/mlir/Dialect/VCIX/VCIX.td
new file mode 100644
index 00000000000000..2b5bbeb22e97fc
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/VCIX.td
@@ -0,0 +1,305 @@
+//===-- VCIX.td - VCIX dialect operation definitions *- tablegen -*--------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// The file defines the basic operations for the VCIX dialect.
+//
+// The SiFive Vector Coprocessor Interface (VCIX) provides a flexible mechanism
+// to extend application processors with custom coprocessors and
+// variable-latency arithmetic units. The interface offers throughput comparable
+// to that of standard RISC-V vector instructions. To accelerate performance,
+// system designers may use VCIX as a low-latency, high-throughput interface to
+// a coprocessor
+//
+// https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
+//
+//===----------------------------------------------------------------------===//
+#ifndef VCIX
+#define VCIX
+
+include "mlir/IR/EnumAttr.td"
+include "mlir/IR/OpBase.td"
+include "mlir/Dialect/LLVMIR/LLVMOpBase.td"
+include "mlir/Interfaces/SideEffectInterfaces.td"
+
+include "mlir/Dialect/VCIX/VCIXAttrs.td"
+
+//===----------------------------------------------------------------------===//
+// VCIX dialect definition.
+//===----------------------------------------------------------------------===//
+
+def VCIX_Dialect : Dialect {
+ let name = "vcix";
+ let cppNamespace = "::mlir::vcix";
+ let description = [{
+ The SiFive Vector Coprocessor Interface (VCIX) provides a flexible mechanism
+ to extend application processors with custom coprocessors and
+ variable-latency arithmetic units. The interface offers throughput comparable
+ to that of standard RISC-V vector instructions. To accelerate performance,
+ system designers may use VCIX as a low-latency, high-throughput interface to
+ a coprocessor
+
+ https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
+ }];
+
+ let usePropertiesForAttributes = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// VCIX Ops
+//===----------------------------------------------------------------------===//
+class VCIX_Op<string mnemonic, list<Trait> traits = []>
+ : Op<VCIX_Dialect, mnemonic, traits> {}
+
+class VCIX_IntrinOp<string mnemonic, list<Trait> traits = []>
+ : LLVM_OpBase<VCIX_Dialect, "intrin." #mnemonic, traits> {}
+
+//===----------------------------------------------------------------------===//
+// Unary VCIX operations
+//===----------------------------------------------------------------------===//
+def VCIX_UnaryROOp : VCIX_Op<"unary.ro", []> {
+ let summary = "Unary VCIX operation with side effects and without result";
+ let description = [{
+ Unary VCIX operation that has some side effects and does not produce result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.x 0000-- 1 ----- xs1 100 ----- none scalar xs1
+ sf.vc.i 0000-- 1 ----- simm 011 ----- none simm[4:0]
+ ```
+ }];
+
+ let arguments = (ins AnyTypeOf<[I<64>, I<32>, I<5>]>: $op,
+ RVLType: $rvl,
+ OpcodeIntAttr: $opcode,
+ VCIX_SewLmulAttr: $sew_lmul,
+ RAttr: $rs2,
+ RAttr: $rd);
+
+ let assemblyFormat = [{
+ $sew_lmul $op `,` $rvl attr-dict `:` `(` type($op) `,` type($rvl) `)`
+ }];
+
+ let hasVerifier = 1;
+}
+
+def VCIX_UnaryOp : VCIX_Op<"unary", []> {
+ let summary = "unary VCIX operation";
+ let description = [{
+ Unary VCIX operation that produces result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.v.x 0000-- 0 ----- xs1 100 vd vector vd scalar xs1
+ sf.vc.v.i 0000-- 0 ----- simm 011 vd vector vd simm[4:0]
+ ```
+ }];
+
+ let arguments = (ins AnyTypeOf<[I<64>, I<32>, I<5>]>: $op,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode,
+ RAttr: $rs2);
+
+ let results = (outs VectorOfRank1: $result);
+
+ let assemblyFormat = [{
+ $op (`,` $rvl^)? attr-dict
+ `:` `(` type($op) (`,` type($rvl)^)? `)` `->` type($result)
+ }];
+
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Binary VCIX operations
+//===----------------------------------------------------------------------===//
+def VCIX_BinaryROOp : VCIX_Op<"binary.ro", []> {
+ let summary = "Read-only binary VCIX operation";
+ let description = [{
+ Read-only binary VCIX operation that does not produce result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.v.vv 0010-- 1 vs2 vs1 000 ----- none vector vs1, vector vs2
+ sf.vc.v.xv 0010-- 1 vs2 xs1 100 ----- none scalar xs1, vector vs2
+ sf.vc.v.iv 0010-- 1 vs2 simm 011 ----- none simm[4:0], vector vs2
+ sf.vc.v.fv 0010-- 1 vs2 fs1 101 ----- none scalar fs1, vector vs2
+ ```
+ }];
+
+ let arguments = (ins VectorOfRank1OrScalar: $op1,
+ VectorOfRank1: $op2,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode,
+ RAttr: $rd);
+
+ let assemblyFormat = [{
+ $op1 `,` $op2 (`,` $rvl^)? attr-dict `:`
+ `(` type($op1) `,` type($op2) (`,` type($rvl)^)? `)`
+ }];
+
+ let hasVerifier = 1;
+}
+
+def VCIX_BinaryOp : VCIX_Op<"binary", []> {
+ let summary = "binary VCIX operation";
+ let description = [{
+ Binary VCIX operation that produces result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.v.vv 0010-- 0 vs2 vs1 000 vd vector vd vector vs1, vector vs2
+ sf.vc.v.xv 0010-- 0 vs2 xs1 100 vd vector vd scalar xs1, vector vs2
+ sf.vc.v.iv 0010-- 0 vs2 simm 011 vd vector vd simm[4:0], vector vs2
+ sf.vc.v.fv 0010-- 0 vs2 fs1 101 vd vector vd scalar fs1, vector vs2
+ ```
+ }];
+
+ let arguments = (ins VectorOfRank1OrScalar: $op1,
+ VectorOfRank1: $op2,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode);
+
+ let results = (outs VectorOfRank1: $result);
+
+ let assemblyFormat = [{
+ $op1 `,` $op2 (`,` $rvl^)? attr-dict `:`
+ `(` type($op1) `,` type($op2) (`,` type($rvl)^)? `)` `->` type($result)
+ }];
+
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Ternary VCIX operations
+//===----------------------------------------------------------------------===//
+def VCIX_TernaryROOp : VCIX_Op<"ternary.ro", []> {
+ let summary = "Ternary VCIX operation";
+ let description = [{
+ Ternary VCIX operation that does not generate result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.vvv 1010-- 1 vs2 vs1 000 vd none vector vs1, vector vs2, vector vd
+ sf.vc.xvv 1010-- 1 vs2 xs1 100 vd none scalar xs1, vector vs2, vector vd
+ sf.vc.ivv 1010-- 1 vs2 simm 011 vd none simm[4:0], vector vs2, vector vd
+ sf.vc.fvv 10101- 1 vs2 fs1 101 vd none scalar fs1, vector vs2, vector vd
+ ```
+ }];
+
+ let arguments = (ins VectorOfRank1OrScalar: $op1,
+ VectorOfRank1: $op2,
+ VectorOfRank1: $op3,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode);
+
+ let assemblyFormat = [{
+ $op1 `,` $op2 `,` $op3 (`,` $rvl^)? attr-dict `:`
+ `(` type($op1) `,` type($op2) `,` type($op3) (`,` type($rvl)^)? `)`
+ }];
+
+ let hasVerifier = 1;
+}
+
+def VCIX_TernaryOp : VCIX_Op<"ternary", []> {
+ let summary = "Ternary VCIX operation";
+ let description = [{
+ Ternary VCIX operation that produces result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.v.vvv 1010-- 0 vs2 vs1 000 vd vector vd vector vs1, vector vs2, vector vd
+ sf.vc.v.xvv 1010-- 0 vs2 xs1 100 vd vector vd scalar xs1, vector vs2, vector vd
+ sf.vc.v.ivv 1010-- 0 vs2 simm 011 vd vector vd simm[4:0], vector vs2, vector vd
+ sf.vc.v.fvv 10101- 0 vs2 fs1 101 vd vector vd scalar fs1, vector vs2, vector vd
+ ```
+ }];
+
+ let arguments = (ins VectorOfRank1OrScalar: $op1,
+ VectorOfRank1: $op2,
+ VectorOfRank1: $op3,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode);
+
+ let results = (outs VectorOfRank1: $result);
+
+ let assemblyFormat = [{
+ $op1 `,` $op2 `,` $op3 (`,` $rvl^)? attr-dict `:`
+ `(` type($op1) `,` type($op2) `,` type($op3) (`,` type($rvl)^)? `)` `->` type($result)
+ }];
+
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Wide ternary VCIX operations
+//===----------------------------------------------------------------------===//
+def VCIX_WideTernaryROOp : VCIX_Op<"wide.ternary.ro", []> {
+ let summary = "Ternary VCIX operation";
+ let description = [{
+ Wide Ternary VCIX operation that does not produce result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.vvw 1111-- 1 vs2 vs1 000 vd none vector vs1, vector vs2, wide vd
+ sf.vc.xvw 1111-- 1 vs2 xs1 100 vd none scalar xs1, vector vs2, wide vd
+ sf.vc.ivw 1111-- 1 vs2 simm 011 vd none simm[4:0], vector vs2, wide vd
+ sf.vc.fvw 11111- 1 vs2 fs1 101 vd none scalar fs1, vector vs2, wide vd
+ ```
+ }];
+
+ let arguments = (ins VectorOfRank1OrScalar: $op1,
+ VectorOfRank1: $op2,
+ VectorOfRank1: $op3,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode);
+
+ let assemblyFormat = [{
+ $op1 `,` $op2 `,` $op3 (`,` $rvl^)? attr-dict `:`
+ `(` type($op1) `,` type($op2) `,` type($op3) (`,` type($rvl)^)? `)`
+ }];
+
+ let hasVerifier = 1;
+}
+
+def VCIX_WideTernaryOp : VCIX_Op<"wide.ternary", []> {
+ let summary = "Ternary VCIX operation";
+ let description = [{
+ Wide Ternary VCIX operation that produces result
+
+ Correponds to
+ ```
+ Mnemonic funct6 vm rs2 rs1 funct3 rd Destination Sources
+ sf.vc.v.vvw 1111-- 0 vs2 vs1 000 vd wide vd vector vs1, vector vs2, wide vd
+ sf.vc.v.xvw 1111-- 0 vs2 xs1 100 vd wide vd scalar xs1, vector vs2, wide vd
+ sf.vc.v.ivw 1111-- 0 vs2 simm 011 vd wide vd simm[4:0], vector vs2, wide vd
+ sf.vc.v.fvw 11111- 0 vs2 fs1 101 vd wide vd scalar fs1, vector vs2, wide vd
+ ```
+ }];
+
+ let arguments = (ins VectorOfRank1OrScalar: $op1,
+ VectorOfRank1: $op2,
+ VectorOfRank1: $op3,
+ Optional<RVLType>: $rvl,
+ OpcodeIntAttr: $opcode);
+
+ let results = (outs VectorOfRank1: $result);
+
+ let assemblyFormat = [{
+ $op1 `,` $op2 `,` $op3 (`,` $rvl^)? attr-dict `:`
+ `(` type($op1) `,` type($op2) `,` type($op3) (`,` type($rvl)^)? `)` `->` type($result)
+ }];
+
+ let hasVerifier = 1;
+}
+#endif // VCIX
diff --git a/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h b/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h
new file mode 100644
index 00000000000000..95b66ee2b9a924
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h
@@ -0,0 +1,20 @@
+//===- VCIXAttr.h - VCIX Dialect Attribute Definition -*- C++ -----------*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_VCIX_TRANSFORMATTRS_H
+#define MLIR_DIALECT_VCIX_TRANSFORMATTRS_H
+
+#include "mlir/IR/Attributes.h"
+#include "mlir/IR/BuiltinAttributes.h"
+
+#include <cstdint>
+#include <optional>
+
+#include "mlir/Dialect/VCIX/VCIXDialectEnums.h.inc"
+
+#endif // MLIR_DIALECT_VCIX_TRANSFORMATTRS_H
diff --git a/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td b/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td
new file mode 100644
index 00000000000000..7f3ecc9a3f2d3c
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td
@@ -0,0 +1,91 @@
+//===- VCIXAttrs.td - VCIX dialect attributes ----*- tablegen -----------*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef MLIR_DIALECT_VCIX_VCIXATTRS
+#define MLIR_DIALECT_VCIX_VCIXATTRS
+
+include "mlir/IR/EnumAttr.td"
+
+//===----------------------------------------------------------------------===//
+// VCIX helper type definitions
+//===----------------------------------------------------------------------===//
+def VectorOfRank1 : AnyTypeOf<[ScalableVectorOfRank<[1]>, VectorOfRank<[1]>]>;
+def VectorOfRank1OrScalar
+ : AnyTypeOf<[VectorOfRank1, I<64>, I<32>, F<16>, F<32>, F<64>, I<5>]>;
+def OpcodeI1Attr : AnyIntegerAttrBase<AnyI<1>, "1-bit integer attribute">;
+def OpcodeI2Attr : AnyIntegerAttrBase<AnyI<2>, "2-bit integer attribute">;
+def OpcodeIntAttr : AnyAttrOf<[OpcodeI1Attr, OpcodeI2Attr]>;
+def RAttr : AnyAttrOf<[AnyIntegerAttrBase<AnyI<5>, "5-bit integer attribute">]>;
+def RVLType : AnyTypeOf<[UI<64>, UI<32>]>;
+
+// Special version for intrinsic version where int attr is zext to i32 or i64
+// depending on xlen of the target
+def VectorOfRank1OrScalarIntrin
+ : AnyTypeOf<[VectorOfRank1, I<64>, I<32>, F<16>, F<32>, F<64>]>;
+def OpcodeIntrinIntAttr : AnyAttrOf<[I64Attr, I32Attr]>;
+def RIntrinAttr : AnyAttrOf<[I64Attr, I32Attr]>;
+def RVLIntrinType : AnyTypeOf<[I<64>, I<32>]>;
+
+def VCIX_e8mf8 : I32EnumAttrCase<"e8mf8", 0, "e8mf8">;
+def VCIX_e8mf4 : I32EnumAttrCase<"e8mf4", 1, "e8mf4">;
+def VCIX_e8mf2 : I32EnumAttrCase<"e8mf2", 2, "e8mf2">;
+def VCIX_e8m1 : I32EnumAttrCase<"e8m1", 3, "e8m1">;
+def VCIX_e8m2 : I32EnumAttrCase<"e8m2", 4, "e8m2">;
+def VCIX_e8m4 : I32EnumAttrCase<"e8m4", 5, "e8m4">;
+def VCIX_e8m8 : I32EnumAttrCase<"e8m8", 6, "e8m8">;
+
+def VCIX_e16mf4 : I32EnumAttrCase<"e16mf4", 7, "e16mf4">;
+def VCIX_e16mf2 : I32EnumAttrCase<"e16mf2", 8, "e16mf2">;
+def VCIX_e16m1 : I32EnumAttrCase<"e16m1", 9, "e16m1">;
+def VCIX_e16m2 : I32EnumAttrCase<"e16m2", 10, "e16m2">;
+def VCIX_e16m4 : I32EnumAttrCase<"e16m4", 11, "e16m4">;
+def VCIX_e16m8 : I32EnumAttrCase<"e16m8", 12, "e16m8">;
+
+def VCIX_e32mf2 : I32EnumAttrCase<"e32mf2", 13, "e32mf2">;
+def VCIX_e32m1 : I32EnumAttrCase<"e32m1", 14, "e32m1">;
+def VCIX_e32m2 : I32EnumAttrCase<"e32m2", 15, "e32m2">;
+def VCIX_e32m4 : I32EnumAttrCase<"e32m4", 16, "e32m4">;
+def VCIX_e32m8 : I32EnumAttrCase<"e32m8", 17, "e32m8">;
+
+def VCIX_e64m1 : I32EnumAttrCase<"e64m1", 18, "e64m1">;
+def VCIX_e64m2 : I32EnumAttrCase<"e64m2", 19, "e64m2">;
+def VCIX_e64m4 : I32EnumAttrCase<"e64m4", 20, "e64m4">;
+def VCIX_e64m8 : I32EnumAttrCase<"e64m8", 21, "e64m8">;
+
+def VCIX_SewLmulAttr : I32EnumAttr<"SewLmul",
+ "A list of all possible SEW and LMUL",
+ [
+ VCIX_e8mf8,
+ VCIX_e8mf4,
+ VCIX_e8mf2,
+ VCIX_e8m1,
+ VCIX_e8m2,
+ VCIX_e8m4,
+ VCIX_e8m8,
+
+ VCIX_e16mf4,
+ VCIX_e16mf2,
+ VCIX_e16m1,
+ VCIX_e16m2,
+ VCIX_e16m4,
+ VCIX_e16m8,
+
+ VCIX_e32mf2,
+ VCIX_e32m1,
+ VCIX_e32m2,
+ VCIX_e32m4,
+ VCIX_e32m8,
+
+ VCIX_e64m1,
+ VCIX_e64m2,
+ VCIX_e64m4,
+ VCIX_e64m8,
+ ]> {
+ let cppNamespace = "::mlir::vcix";
+}
+
+#endif // MLIR_DIALECT_VCIX_VCIXATTRS
diff --git a/mlir/include/mlir/Dialect/VCIX/VCIXDialect.h b/mlir/include/mlir/Dialect/VCIX/VCIXDialect.h
new file mode 100644
index 00000000000000..0e795f42f58dee
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/VCIXDialect.h
@@ -0,0 +1,28 @@
+//===- VCIXDialect.h - MLIR Dialect for VCIX --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Target dialect for VCIX in MLIR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_VCIX_VCIXDIALECT_H_
+#define MLIR_DIALECT_VCIX_VCIXDIALECT_H_
+
+#include "mlir/Bytecode/BytecodeOpInterface.h"
+#include "mlir/IR/BuiltinTypes.h"
+#include "mlir/IR/Dialect.h"
+#include "mlir/IR/OpDefinition.h"
+#include "mlir/Interfaces/SideEffectInterfaces.h"
+
+#include "mlir/Dialect/VCIX/VCIXAttrs.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h.inc"
+
+#define GET_OP_CLASSES
+#include "mlir/Dialect/VCIX/VCIX.h.inc"
+
+#endif // MLIR_DIALECT_VCIX_VCIXDIALECT_H_
diff --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h
index 19a62cadaa2e04..3dcb4a41e9d17c 100644
--- a/mlir/include/mlir/InitAllDialects.h
+++ b/mlir/include/mlir/InitAllDialects.h
@@ -83,6 +83,7 @@
#include "mlir/Dialect/Transform/IR/TransformDialect.h"
#include "mlir/Dialect/Transform/PDLExtension/PDLExtension.h"
#include "mlir/Dialect/UB/IR/UBOps.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.h"
#include "mlir/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.h"
@@ -137,6 +138,7 @@ inline void registerAllDialects(DialectRegistry ®istry) {
tosa::TosaDialect,
transform::TransformDialect,
ub::UBDialect,
+ vcix::VCIXDialect,
vector::VectorDialect,
x86vector::X86VectorDialect>();
// clang-format on
diff --git a/mlir/lib/Dialect/CMakeLists.txt b/mlir/lib/Dialect/CMakeLists.txt
index 68776a695cac4d..c1e3bd6998ef87 100644
--- a/mlir/lib/Dialect/CMakeLists.txt
+++ b/mlir/lib/Dialect/CMakeLists.txt
@@ -37,6 +37,7 @@ add_subdirectory(Tosa)
add_subdirectory(Transform)
add_subdirectory(UB)
add_subdirectory(Utils)
+add_subdirectory(VCIX)
add_subdirectory(Vector)
add_subdirectory(X86Vector)
diff --git a/mlir/lib/Dialect/VCIX/CMakeLists.txt b/mlir/lib/Dialect/VCIX/CMakeLists.txt
new file mode 100644
index 00000000000000..f33061b2d87cff
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/CMakeLists.txt
@@ -0,0 +1 @@
+add_subdirectory(IR)
diff --git a/mlir/lib/Dialect/VCIX/IR/CMakeLists.txt b/mlir/lib/Dialect/VCIX/IR/CMakeLists.txt
new file mode 100644
index 00000000000000..063d76b04b884c
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/IR/CMakeLists.txt
@@ -0,0 +1,16 @@
+add_mlir_dialect_library(MLIRVCIXDialect
+ VCIXDialect.cpp
+ VCIXAttrs.cpp
+ VCIXOps.cpp
+
+ ADDITIONAL_HEADER_DIRS
+ ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/VCIX
+
+ DEPENDS
+ MLIRVCIXIncGen
+
+ LINK_LIBS PUBLIC
+ MLIRIR
+ MLIRLLVMDialect
+ MLIRSideEffectInterfaces
+ )
diff --git a/mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp b/mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp
new file mode 100644
index 00000000000000..d0562ae451ca11
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp
@@ -0,0 +1,12 @@
+//===- VCIXAttrs.cpp - VCIX Dialect Attribute Definitions -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/VCIX/VCIXAttrs.h"
+#include "mlir/IR/BuiltinTypes.h"
+
+#include "mlir/Dialect/VCIX/VCIXDialectEnums.cpp.inc"
diff --git a/mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp b/mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp
new file mode 100644
index 00000000000000..aa9260e1dbf6bc
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp
@@ -0,0 +1,28 @@
+//===- VCIXDialect.cpp - MLIR VCIX ops implementation ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the VCIX dialect and its operations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
+#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/OpImplementation.h"
+#include "mlir/IR/TypeUtilities.h"
+
+using namespace mlir;
+
+#include "mlir/Dialect/VCIX/VCIXDialect.cpp.inc"
+
+void vcix::VCIXDialect::initialize() {
+ addOperations<
+#define GET_OP_LIST
+#include "mlir/Dialect/VCIX/VCIX.cpp.inc"
+ >();
+}
diff --git a/mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp b/mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp
new file mode 100644
index 00000000000000..7c1521d246b7d3
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp
@@ -0,0 +1,174 @@
+//===- VCIXOps.cpp - VCIX dialect operations ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
+#include "mlir/Dialect/VCIX/VCIXAttrs.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/Diagnostics.h"
+#include "mlir/IR/OpImplementation.h"
+#include "mlir/IR/TypeUtilities.h"
+#include "mlir/IR/Verifier.h"
+
+using namespace mlir;
+
+#define GET_OP_CLASSES
+#include "mlir/Dialect/VCIX/VCIX.cpp.inc"
+
+static LogicalResult verifyOpcode(Attribute opcodeAttr,
+ const unsigned expectedBitSize) {
+ if (auto intAttr = opcodeAttr.dyn_cast<IntegerAttr>())
+ return LogicalResult::success(intAttr.getType().isInteger(expectedBitSize));
+ return failure();
+}
+
+static LogicalResult isWidenType(Type from, Type to) {
+ if (isa<IntegerType>(from)) {
+ return LogicalResult::success(2 * from.cast<IntegerType>().getWidth() ==
+ to.cast<IntegerType>().getWidth());
+ }
+ if (isa<FloatType>(from)) {
+ if (from.isF16() && to.isF32())
+ return success();
+ if (from.isF32() && to.isF64())
+ return success();
+ }
+ return failure();
+}
+
+// Return true if type is a scalable vector that encodes LMUL and SEW correctly
+// https://lists.llvm.org/pipermail/llvm-dev/2020-October/145850.html
+static LogicalResult verifyVectorType(Type t) {
+ auto vt = t.dyn_cast<VectorType>();
+ if (!vt || vt.getRank() != 1)
+ return failure();
+ if (!vt.isScalable())
+ return success();
+
+ Type eltTy = vt.getElementType();
+ unsigned sew = 0;
+ if (eltTy.isF32())
+ sew = 32;
+ else if (eltTy.isF64())
+ sew = 64;
+ else if (auto intTy = eltTy.dyn_cast<IntegerType>())
+ sew = intTy.getWidth();
+ else
+ return failure();
+
+ unsigned eltCount = vt.getShape()[0];
+ const unsigned lmul = eltCount * sew / 64;
+ return lmul > 8 ? failure() : success();
+}
+
+template <typename OpT>
+static LogicalResult verifyVCIXOpCommon(OpT op, Value result) {
+ Type op1Type = op.getOp1().getType();
+ VectorType op2Type = op.getOp2().getType().template cast<VectorType>();
+ if (result && op2Type != result.getType())
+ return op.emitOpError("Result type does not match to op2 type");
+
+ if (failed(verifyVectorType(op2Type)))
+ return op.emitOpError(
+ "used type does not represent RVV-compatible scalable vector type");
+
+ if (!op2Type.isScalable() && op.getRvl())
+ return op.emitOpError(
+ "'rvl' must not be specified if operation is done on a "
+ "fixed vector type");
+
+ if (op1Type.isa<VectorType>() && op1Type != op2Type)
+ return op.emitOpError("op1 type does not match to op2 type");
+
+ if (op1Type.isa<FloatType>()) {
+ if (failed(verifyOpcode(op.getOpcodeAttr(), 1)))
+ return op.emitOpError(
+ "with a floating point scalar can only use 1-bit opcode");
+ return success();
+ }
+ if (failed(verifyOpcode(op.getOpcodeAttr(), 2)))
+ return op.emitOpError("must use 2-bit opcode");
+
+ if (op1Type.isInteger(5)) {
+ Operation *defOp = op.getOp1().getDefiningOp();
+ if (!defOp || !defOp->hasTrait<OpTrait::ConstantLike>())
+ return op.emitOpError("immediate operand must be a constant");
+ return success();
+ }
+ if (op1Type.isa<IntegerType>() && !op1Type.isInteger(32) &&
+ !op1Type.isInteger(64))
+ return op.emitOpError(
+ "non-constant integer first operand must be of a size 32 or 64");
+ return success();
+}
+
+/// Unary operations
+LogicalResult vcix::UnaryROOp::verify() {
+ if (failed(verifyOpcode(getOpcodeAttr(), 2)))
+ return emitOpError("must use 2-bit opcode");
+ return success();
+}
+
+LogicalResult vcix::UnaryOp::verify() {
+ if (failed(verifyOpcode(getOpcodeAttr(), 2)))
+ return emitOpError("must use 2-bit opcode");
+
+ if (failed(verifyVectorType(getResult().getType())))
+ return emitOpError(
+ "result type does not represent RVV-compatible scalable vector type");
+
+ return success();
+}
+
+/// Binary operations
+LogicalResult vcix::BinaryROOp::verify() {
+ return verifyVCIXOpCommon(*this, nullptr);
+}
+
+LogicalResult vcix::BinaryOp::verify() {
+ return verifyVCIXOpCommon(*this, getResult());
+}
+
+/// Ternary operations
+LogicalResult vcix::TernaryROOp::verify() {
+ VectorType op2Type = getOp2().getType().cast<VectorType>();
+ VectorType op3Type = getOp3().getType().cast<VectorType>();
+ if (op2Type != op3Type) {
+ return emitOpError("op3 type does not match to op2 type");
+ }
+ return verifyVCIXOpCommon(*this, nullptr);
+}
+
+LogicalResult vcix::TernaryOp::verify() {
+ VectorType op2Type = getOp2().getType().cast<VectorType>();
+ VectorType op3Type = getOp3().getType().cast<VectorType>();
+ if (op2Type != op3Type)
+ return emitOpError("op3 type does not match to op2 type");
+
+ return verifyVCIXOpCommon(*this, getResult());
+}
+
+/// Wide Ternary operations
+LogicalResult vcix::WideTernaryROOp::verify() {
+ VectorType op2Type = getOp2().getType().cast<VectorType>();
+ VectorType op3Type = getOp3().getType().cast<VectorType>();
+ if (failed(isWidenType(op2Type.getElementType(), op3Type.getElementType())))
+ return emitOpError("result type is not widened type of op2");
+
+ return verifyVCIXOpCommon(*this, nullptr);
+}
+
+LogicalResult vcix::WideTernaryOp::verify() {
+ VectorType op2Type = getOp2().getType().cast<VectorType>();
+ VectorType op3Type = getOp3().getType().cast<VectorType>();
+ if (failed(isWidenType(op2Type.getElementType(), op3Type.getElementType())))
+ return emitOpError("result type is not widened type of op2");
+
+ // Don't compare result type for widended operations
+ return verifyVCIXOpCommon(*this, nullptr);
+}
diff --git a/mlir/test/Dialect/VCIX/invalid.mlir b/mlir/test/Dialect/VCIX/invalid.mlir
new file mode 100644
index 00000000000000..65c783dc852002
--- /dev/null
+++ b/mlir/test/Dialect/VCIX/invalid.mlir
@@ -0,0 +1,57 @@
+// RUN: mlir-opt %s -split-input-file -verify-diagnostics
+
+// -----
+func.func @unary_e8mf2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ // expected-error at +1 {{must use 2-bit opcode}}
+ vcix.unary.ro e8mf2 %const, %rvl { opcode = 1 : i1, rs2 = 30 : i5, rd = 31 : i5 } : (i32, ui32)
+ return
+}
+// -----
+func.func @binary_fv(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ // expected-error at +1 {{with a floating point scalar can only use 1-bit opcode}}
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 2 : i2 } : (f32, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @ternary_fvv(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ // expected-error at +1 {{with a floating point scalar can only use 1-bit opcode}}
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 2 : i2 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @wide_ternary_fvw(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+ // expected-error at +1 {{with a floating point scalar can only use 1-bit opcode}}
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 2 : i2 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %0 : vector<[4] x f64>
+}
+
+// -----
+func.func @wide_ternary_vvw(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ // expected-error at +1 {{result type is not widened type of op2}}
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @binary_fv_wrong_vtype(%op1: f32, %op2 : vector<[32] x f32>, %rvl : ui32) -> vector<[32] x f32> {
+ // expected-error at +1 {{used type does not represent RVV-compatible scalable vector type}}
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<[32] x f32>, ui32) -> vector<[32] x f32>
+ return %0 : vector<[32] x f32>
+}
+
+// -----
+func.func @binary_fv_vls_rvl(%op1: f32, %op2 : vector<4 x f32>, %rvl : ui32) -> vector<4 x f32> {
+ // expected-error at +1 {{'rvl' must not be specified if operation is done on a fixed vector type}}
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<4 x f32>, ui32) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// -----
+func.func @binary_nonconst(%val: i5, %op: vector<[4] x f32>, %rvl: ui32) {
+ // expected-error at +1 {{immediate operand must be a constant}}
+ vcix.binary.ro %val, %op, %rvl { opcode = 1 : i2, rd = 30 : i5 } : (i5, vector<[4] x f32>, ui32)
+ return
+}
diff --git a/mlir/test/Dialect/VCIX/ops.mlir b/mlir/test/Dialect/VCIX/ops.mlir
new file mode 100644
index 00000000000000..c5392d67d50d50
--- /dev/null
+++ b/mlir/test/Dialect/VCIX/ops.mlir
@@ -0,0 +1,531 @@
+// RUN: mlir-opt %s -split-input-file -verify-diagnostics
+
+// -----
+func.func @unary_ro_e8mf8(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8mf8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e8mf4(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e8mf2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e8m1(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e8m2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e8m4(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e8m8(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// -----
+func.func @unary_e8mf8(%rvl: ui32) -> vector<[1] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i8>
+ return %0 : vector<[1] x i8>
+}
+
+func.func @unary_e8mf4(%rvl: ui32) -> vector<[2] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i8>
+ return %0 : vector<[2] x i8>
+}
+
+func.func @unary_e8mf2(%rvl: ui32) -> vector<[4] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i8>
+ return %0 : vector<[4] x i8>
+}
+
+func.func @unary_e8m1(%rvl: ui32) -> vector<[8] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i8>
+ return %0 : vector<[8] x i8>
+}
+
+func.func @unary_e8m2(%rvl: ui32) -> vector<[16] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i8>
+ return %0 : vector<[16] x i8>
+}
+
+func.func @unary_e8m4(%rvl: ui32) -> vector<[32] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[32] x i8>
+ return %0 : vector<[32] x i8>
+}
+
+func.func @unary_e8m8(%rvl: ui32) -> vector<[64] x i8>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[64] x i8>
+ return %0 : vector<[64] x i8>
+}
+
+// -----
+func.func @unary_ro_e16mf4(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e16mf2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e16m1(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e16m2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e16m4(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e16m8(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// -----
+func.func @unary_e16mf4(%rvl: ui32) -> vector<[1] x i16>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i16>
+ return %0 : vector<[1] x i16>
+}
+
+func.func @unary_e16mf2(%rvl: ui32) -> vector<[2] x i16>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i16>
+ return %0 : vector<[2] x i16>
+}
+
+func.func @unary_e16m1(%rvl: ui32) -> vector<[4] x i16>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i16>
+ return %0 : vector<[4] x i16>
+}
+
+func.func @unary_e16m2(%rvl: ui32) -> vector<[8] x i16>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i16>
+ return %0 : vector<[8] x i16>
+}
+
+func.func @unary_e16m4(%rvl: ui32) -> vector<[16] x i16>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i16>
+ return %0 : vector<[16] x i16>
+}
+
+func.func @unary_e16m8(%rvl: ui32) -> vector<[32] x i16>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[32] x i16>
+ return %0 : vector<[32] x i16>
+}
+
+// -----
+func.func @unary_ro_e32mf2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e32m1(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e32m2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e32m4(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e32m8(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// -----
+func.func @unary_e32mf2(%rvl: ui32) -> vector<[1] x i32>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i32>
+ return %0 : vector<[1] x i32>
+}
+
+func.func @unary_e32m1(%rvl: ui32) -> vector<[2] x i32>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i32>
+ return %0 : vector<[2] x i32>
+}
+
+func.func @unary_e32m2(%rvl: ui32) -> vector<[4] x i32>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i32>
+ return %0 : vector<[4] x i32>
+}
+
+func.func @unary_e32m4(%rvl: ui32) -> vector<[8] x i32>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i32>
+ return %0 : vector<[8] x i32>
+}
+
+func.func @unary_e32m8(%rvl: ui32) -> vector<[16] x i32>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i32>
+ return %0 : vector<[16] x i32>
+}
+
+// -----
+func.func @unary_ro_e64m1(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e64m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e64m2(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e64m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e64m4(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e64m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+func.func @unary_ro_e64m8(%rvl: ui32) {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e64m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// -----
+func.func @unary_e64m1(%rvl: ui32) -> vector<[1] x i64>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i64>
+ return %0 : vector<[1] x i64>
+}
+
+func.func @unary_e64m2(%rvl: ui32) -> vector<[2] x i64>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i64>
+ return %0 : vector<[2] x i64>
+}
+
+func.func @unary_e64m4(%rvl: ui32) -> vector<[4] x i64>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i64>
+ return %0 : vector<[4] x i64>
+}
+
+func.func @unary_e64m8(%rvl: ui32) -> vector<[8] x i64>{
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i64>
+ return %0 : vector<[8] x i64>
+}
+
+// -----
+func.func @binary_vv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui32) {
+ vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @binary_vv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+func.func @binary_xv_ro(%op1: i32, %op2 : vector<[4] x f32>, %rvl : ui32) {
+ vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i32, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @binary_xv(%op1: i32, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+func.func @binary_fv_ro(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui32) {
+ vcix.binary.ro %op1, %op2, %rvl { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @binary_fv(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+func.func @binary_iv_ro(%op2 : vector<[4] x f32>, %rvl : ui32) {
+ %const = arith.constant 1 : i5
+ vcix.binary.ro %const, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @binary_iv(%op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %const = arith.constant 1 : i5
+ %0 = vcix.binary %const, %op2, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @ternary_vvv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) {
+ vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @ternary_vvv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+func.func @ternary_xvv_ro(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) {
+ vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @ternary_xvv(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+func.func @ternary_fvv_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) {
+ vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @ternary_fvv(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+func.func @ternary_ivv_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) {
+ %const = arith.constant 1 : i5
+ vcix.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+func.func @ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+ %const = arith.constant 1 : i5
+ %0 = vcix.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @wide_ternary_vvw_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) {
+ vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui32)
+ return
+}
+
+func.func @wide_ternary_vvw(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %0: vector<[4] x f64>
+}
+
+func.func @wide_ternary_xvw_ro(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) {
+ vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f64>, ui32)
+ return
+}
+
+func.func @wide_ternary_xvw(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %0 : vector<[4] x f64>
+}
+
+func.func @wide_ternary_fvw_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) {
+ vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui32)
+ return
+}
+
+func.func @wide_ternary_fvw(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %op3 : vector<[4] x f64>
+}
+
+func.func @wide_ternary_ivw_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) {
+ %const = arith.constant 1 : i5
+ vcix.wide.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui32)
+ return
+}
+
+func.func @wide_ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+ %const = arith.constant 1 : i5
+ %0 = vcix.wide.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %op3 : vector<[4] x f64>
+}
+
+// -----
+func.func @fixed_binary_vv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) {
+ vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_binary_vv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) -> vector<4 x f32> {
+ %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+func.func @fixed_binary_xv_ro(%op1: i32, %op2 : vector<4 x f32>) {
+ vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i32, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_binary_xv(%op1: i32, %op2 : vector<4 x f32>) -> vector<4 x f32> {
+ %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (i32, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+func.func @fixed_binary_fv_ro(%op1: f32, %op2 : vector<4 x f32>) {
+ vcix.binary.ro %op1, %op2 { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_binary_fv(%op1: f32, %op2 : vector<4 x f32>) -> vector<4 x f32> {
+ %0 = vcix.binary %op1, %op2 { opcode = 1 : i1 } : (f32, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+func.func @fixed_binary_iv_ro(%op2 : vector<4 x f32>) {
+ %const = arith.constant 1 : i5
+ vcix.binary.ro %const, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_binary_iv(%op2 : vector<4 x f32>) -> vector<4 x f32> {
+ %const = arith.constant 1 : i5
+ %0 = vcix.binary %const, %op2 { opcode = 3 : i2 } : (i5, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// -----
+func.func @fixed_ternary_vvv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+ vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_ternary_vvv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+ %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+func.func @fixed_ternary_xvv_ro(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+ vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_ternary_xvv(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+ %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+func.func @fixed_ternary_fvv_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+ vcix.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_ternary_fvv(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+ %0 = vcix.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+func.func @fixed_ternary_ivv_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+ %const = arith.constant 1 : i5
+ vcix.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+func.func @fixed_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+ %const = arith.constant 1 : i5
+ %0 = vcix.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// -----
+func.func @fixed_wide_ternary_vvw_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+ vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+func.func @fixed_wide_ternary_vvw(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+ %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %0 : vector<4 x f64>
+}
+
+func.func @fixed_wide_ternary_xvw_ro(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+ vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+func.func @fixed_wide_ternary_xvw(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+ %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %0 : vector<4 x f64>
+}
+
+func.func @fixed_wide_ternary_fvw_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+ vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+func.func @fixed_wide_ternary_fvw(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+ %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %op3 : vector<4 x f64>
+}
+
+func.func @fixed_wide_ternary_ivw_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+ %const = arith.constant 1 : i5
+ vcix.wide.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+func.func @fixed_wide_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+ %const = arith.constant 1 : i5
+ %0 = vcix.wide.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %op3 : vector<4 x f64>
+}
>From 5a5a808e0938ac46649a45ffc015310aa4d0f12e Mon Sep 17 00:00:00 2001
From: Kolya Panchenko <kolya.panchenko at sifive.com>
Date: Thu, 7 Dec 2023 14:53:43 -0800
Subject: [PATCH 2/2] [MLIR][RISCV] Add VCIX legalization to `VectorToLLVM`
pass
---
mlir/include/mlir/Dialect/VCIX/CMakeLists.txt | 4 +
mlir/include/mlir/Dialect/VCIX/Transforms.h | 29 +
mlir/include/mlir/Dialect/VCIX/VCIX.td | 214 ++++
.../Conversion/VectorToLLVM/CMakeLists.txt | 2 +
.../VectorToLLVM/ConvertVectorToLLVMPass.cpp | 8 +
mlir/lib/Dialect/VCIX/CMakeLists.txt | 1 +
.../Dialect/VCIX/Transforms/CMakeLists.txt | 12 +
.../VCIX/Transforms/LegalizeForLLVMExport.cpp | 266 ++++
.../Dialect/VCIX/legalize-for-llvm-rv32.mlir | 1071 +++++++++++++++++
.../Dialect/VCIX/legalize-for-llvm-rv64.mlir | 1071 +++++++++++++++++
10 files changed, 2678 insertions(+)
create mode 100644 mlir/include/mlir/Dialect/VCIX/Transforms.h
create mode 100644 mlir/lib/Dialect/VCIX/Transforms/CMakeLists.txt
create mode 100644 mlir/lib/Dialect/VCIX/Transforms/LegalizeForLLVMExport.cpp
create mode 100644 mlir/test/Dialect/VCIX/legalize-for-llvm-rv32.mlir
create mode 100644 mlir/test/Dialect/VCIX/legalize-for-llvm-rv64.mlir
diff --git a/mlir/include/mlir/Dialect/VCIX/CMakeLists.txt b/mlir/include/mlir/Dialect/VCIX/CMakeLists.txt
index 2ed490283b3519..49d4202bf9d8bb 100644
--- a/mlir/include/mlir/Dialect/VCIX/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/VCIX/CMakeLists.txt
@@ -1,6 +1,10 @@
add_mlir_dialect(VCIX vcix)
add_mlir_doc(VCIXOps VCIXOps Dialects/ -gen-dialect-doc -dialect=vcix)
+set(LLVM_TARGET_DEFINITIONS VCIX.td)
+mlir_tablegen(VCIXConversions.inc -gen-llvmir-conversions)
+add_public_tablegen_target(MLIRVCIXConversionsIncGen)
+
set(LLVM_TARGET_DEFINITIONS VCIXAttrs.td)
mlir_tablegen(VCIXDialectEnums.h.inc -gen-enum-decls)
mlir_tablegen(VCIXDialectEnums.cpp.inc -gen-enum-defs)
diff --git a/mlir/include/mlir/Dialect/VCIX/Transforms.h b/mlir/include/mlir/Dialect/VCIX/Transforms.h
new file mode 100644
index 00000000000000..3287e0f535f988
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/Transforms.h
@@ -0,0 +1,29 @@
+//===- Transforms.h - VCIX Dialect Transformation Entrypoints ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_VCIX_TRANSFORMS_H
+#define MLIR_DIALECT_VCIX_TRANSFORMS_H
+
+namespace mlir {
+
+class LLVMConversionTarget;
+class LLVMTypeConverter;
+class RewritePatternSet;
+
+/// Collect a set of patterns to lower VCIX ops to ops that map to LLVM
+/// intrinsics.
+void populateVCIXLegalizeForLLVMExportPatterns(LLVMTypeConverter &converter,
+ RewritePatternSet &patterns);
+
+/// Configure the target to support lowering VCIX ops to ops that map to LLVM
+/// intrinsics.
+void configureVCIXLegalizeForExportTarget(LLVMConversionTarget &target);
+
+} // namespace mlir
+
+#endif // MLIR_DIALECT_VCIX_TRANSFORMS_H
diff --git a/mlir/include/mlir/Dialect/VCIX/VCIX.td b/mlir/include/mlir/Dialect/VCIX/VCIX.td
index 2b5bbeb22e97fc..655a563764632a 100644
--- a/mlir/include/mlir/Dialect/VCIX/VCIX.td
+++ b/mlir/include/mlir/Dialect/VCIX/VCIX.td
@@ -302,4 +302,218 @@ def VCIX_WideTernaryOp : VCIX_Op<"wide.ternary", []> {
let hasVerifier = 1;
}
+
+def VCIX_UnaryIntrinROOp : VCIX_IntrinOp<"unary.ro"> {
+ let arguments = (ins AnyTypeOf<[I<64>, I<32>, I<5>]>: $op,
+ RVLIntrinType: $rvl,
+ OpcodeIntrinIntAttr: $opcode,
+ VCIX_SewLmulAttr: $sew_lmul,
+ RIntrinAttr: $rs2,
+ RIntrinAttr: $rd);
+
+ string llvmBuilder = [{
+ const unsigned xlenWidth = getXlenFromOpcode($opcode);
+ llvm::Type *xlen =
+ llvm::Type::getIntNTy(moduleTranslation.getLLVMContext(), xlenWidth);
+ llvm::Value *opcodeConst = getLLVMConstant(
+ xlen, $opcode, $_location, moduleTranslation);
+
+ llvm::Value *rs2Const = getLLVMConstant(
+ xlen, $rs2, $_location, moduleTranslation);
+
+ llvm::Value *rdConst = getLLVMConstant(
+ xlen, $rd, $_location, moduleTranslation);
+
+ auto intId = getUnaryROIntrinsicId($sew_lmul, op.getOp().getType());
+ createIntrinsicCall(builder, intId,
+ {opcodeConst, rs2Const, rdConst, $op, $rvl},
+ {xlen, xlen, xlen});
+ }];
+}
+
+def VCIX_UnaryIntrinOp : VCIX_IntrinOp<"unary"> {
+ let arguments = (ins AnyTypeOf<[I<64>, I<32>, I<5>]>: $op,
+ Optional<RVLIntrinType>: $rvl,
+ OpcodeIntrinIntAttr: $opcode,
+ RIntrinAttr: $rs2);
+
+ let results = (outs VectorOfRank1: $result);
+
+ string llvmBuilder = [{
+ const unsigned xlenWidth = getXlenFromOpcode($opcode);
+ llvm::Type *xlen =
+ llvm::Type::getIntNTy(moduleTranslation.getLLVMContext(), xlenWidth);
+ llvm::Value *opcodeConst = getLLVMConstant(
+ xlen, $opcode, $_location, moduleTranslation);
+
+ llvm::Value *rs2Const = getLLVMConstant(
+ xlen, $rs2, $_location, moduleTranslation);
+
+ VectorType vt = op.getResult().getType().cast<VectorType>();
+ auto intId = getUnaryIntrinsicId(op.getOp().getType(), vt);
+ llvm::Value *rvl =
+ convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+ $result = createIntrinsicCall(
+ builder, intId, {opcodeConst, rs2Const, $op, rvl},
+ {$_resultType, xlen, xlen, xlen});
+ }];
+}
+
+def VCIX_BinaryIntrinROOp : VCIX_IntrinOp<"binary.ro"> {
+ let arguments = (ins VectorOfRank1OrScalarIntrin: $op1,
+ VectorOfRank1: $op2,
+ Optional<RVLIntrinType>: $rvl,
+ OpcodeIntrinIntAttr: $opcode,
+ RIntrinAttr: $rd);
+
+ string llvmBuilder = [{
+ const unsigned xlenWidth = getXlenFromOpcode($opcode);
+ llvm::Type *xlen =
+ llvm::Type::getIntNTy(moduleTranslation.getLLVMContext(), xlenWidth);
+ llvm::Value *opcodeConst = getLLVMConstant(
+ xlen, $opcode, $_location, moduleTranslation);
+
+ llvm::Value *rdConst = getLLVMConstant(
+ xlen, $rd, $_location, moduleTranslation);
+
+ auto intId = getBinaryROIntrinsicId(op.getOp1().getType());
+ VectorType vt = op.getOp2().getType().cast<VectorType>();
+ llvm::Value *rvl =
+ convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+ createIntrinsicCall(builder, intId,
+ {opcodeConst, rdConst, $op2, $op1, rvl},
+ {xlen, $op2->getType(), $op1->getType(), xlen});
+ }];
+}
+
+def VCIX_BinaryIntrinOp : VCIX_IntrinOp<"binary"> {
+ let arguments = (ins VectorOfRank1OrScalarIntrin: $op1,
+ VectorOfRank1: $op2,
+ Optional<RVLIntrinType>: $rvl,
+ OpcodeIntrinIntAttr: $opcode);
+
+ let results = (outs VectorOfRank1: $result);
+
+ string llvmBuilder = [{
+ const unsigned xlenWidth = getXlenFromOpcode($opcode);
+ llvm::Type *xlen = llvm::Type::getIntNTy(
+ moduleTranslation.getLLVMContext(), xlenWidth);
+ llvm::Value *opcodeConst = getLLVMConstant(
+ xlen, $opcode, $_location, moduleTranslation);
+
+ auto intId = getBinaryIntrinsicId(op.getOp1().getType());
+ VectorType vt = op.getResult().getType().cast<VectorType>();
+ llvm::Value *rvl =
+ convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+ $result = createIntrinsicCall(
+ builder, intId, {opcodeConst, $op2, $op1, rvl},
+ {$_resultType, xlen, $op2->getType(), $op1->getType(), xlen});
+ }];
+}
+
+def VCIX_TernaryIntrinROOp : VCIX_IntrinOp<"ternary.ro"> {
+ let arguments = (ins VectorOfRank1OrScalarIntrin: $op1,
+ VectorOfRank1: $op2,
+ VectorOfRank1: $op3,
+ Optional<RVLIntrinType>: $rvl,
+ OpcodeIntrinIntAttr: $opcode);
+
+ string llvmBuilder = [{
+ const unsigned xlenWidth = getXlenFromOpcode($opcode);
+ llvm::Type *xlen = llvm::Type::getIntNTy(
+ moduleTranslation.getLLVMContext(), xlenWidth);
+ llvm::Value *opcodeConst = getLLVMConstant(
+ xlen, $opcode, $_location, moduleTranslation);
+
+ auto intId = getTernaryROIntrinsicId(op.getOp1().getType());
+ VectorType vt = op.getOp3().getType().cast<VectorType>();
+ llvm::Value *rvl =
+ convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+ createIntrinsicCall(
+ builder, intId, {opcodeConst, $op3, $op2, $op1, rvl},
+ {xlen, $op3->getType(), $op2->getType(), $op1->getType(), xlen});
+ }];
+}
+
+def VCIX_TernaryIntrinOp : VCIX_IntrinOp<"ternary"> {
+ let arguments = (ins VectorOfRank1OrScalarIntrin: $op1,
+ VectorOfRank1: $op2,
+ VectorOfRank1: $op3,
+ Optional<RVLIntrinType>: $rvl,
+ OpcodeIntrinIntAttr: $opcode);
+ let results = (outs VectorOfRank1: $result);
+
+ string llvmBuilder = [{
+ const unsigned xlenWidth = getXlenFromOpcode($opcode);
+ llvm::Type *xlen = llvm::Type::getIntNTy(
+ moduleTranslation.getLLVMContext(), xlenWidth);
+ llvm::Value *opcodeConst = getLLVMConstant(
+ xlen, $opcode, $_location, moduleTranslation);
+
+ auto intId = getTernaryIntrinsicId(op.getOp1().getType());
+ VectorType vt = op.getResult().getType().cast<VectorType>();
+ llvm::Value *rvl =
+ convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+ $result = createIntrinsicCall(
+ builder, intId, {opcodeConst, $op3, $op2, $op1, rvl},
+ {$_resultType, xlen, $op3->getType(), $op2->getType(), $op1->getType(), xlen});
+ }];
+}
+
+def VCIX_WideTernaryIntrinROOp : VCIX_IntrinOp<"wide.ternary.ro"> {
+ let arguments = (ins VectorOfRank1OrScalarIntrin: $op1,
+ VectorOfRank1: $op2,
+ VectorOfRank1: $op3,
+ Optional<RVLIntrinType>: $rvl,
+ OpcodeIntrinIntAttr: $opcode);
+
+ string llvmBuilder = [{
+ const unsigned xlenWidth = getXlenFromOpcode($opcode);
+ llvm::Type *xlen = llvm::Type::getIntNTy(
+ moduleTranslation.getLLVMContext(), xlenWidth);
+ llvm::Value *opcodeConst = getLLVMConstant(
+ xlen, $opcode, $_location, moduleTranslation);
+
+ auto intId = getWideTernaryROIntrinsicId(op.getOp1().getType());
+ VectorType vt = op.getOp3().getType().cast<VectorType>();
+ llvm::Value *rvl =
+ convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+ createIntrinsicCall(
+ builder, intId, {opcodeConst, $op3, $op2, $op1, rvl},
+ {xlen, $op3->getType(), $op2->getType(), $op1->getType(), xlen});
+ }];
+}
+
+def VCIX_WideTernaryIntrinOp : VCIX_IntrinOp<"wide.ternary", []> {
+ let arguments = (ins VectorOfRank1OrScalarIntrin: $op1,
+ VectorOfRank1: $op2,
+ VectorOfRank1: $op3,
+ Optional<RVLIntrinType>: $rvl,
+ OpcodeIntrinIntAttr: $opcode);
+
+ let results = (outs VectorOfRank1: $result);
+
+ string llvmBuilder = [{
+ const unsigned xlenWidth = getXlenFromOpcode($opcode);
+ llvm::Type *xlen = llvm::Type::getIntNTy(
+ moduleTranslation.getLLVMContext(), xlenWidth);
+ llvm::Value *opcodeConst = getLLVMConstant(
+ xlen, $opcode, $_location, moduleTranslation);
+
+ auto intId = getWideTernaryIntrinsicId(op.getOp1().getType());
+ VectorType vt = op.getResult().getType().cast<VectorType>();
+ llvm::Value *rvl =
+ convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+ $result = createIntrinsicCall(
+ builder, intId, {opcodeConst, $op3, $op2, $op1, rvl},
+ {$_resultType, xlen, $op3->getType(), $op2->getType(), $op1->getType(), xlen});
+ }];
+}
#endif // VCIX
diff --git a/mlir/lib/Conversion/VectorToLLVM/CMakeLists.txt b/mlir/lib/Conversion/VectorToLLVM/CMakeLists.txt
index 5fbb50f62395ec..0d9e86b7eb39c9 100644
--- a/mlir/lib/Conversion/VectorToLLVM/CMakeLists.txt
+++ b/mlir/lib/Conversion/VectorToLLVM/CMakeLists.txt
@@ -19,6 +19,8 @@ add_mlir_conversion_library(MLIRVectorToLLVM
MLIRMemRefDialect
MLIRTargetLLVMIRExport
MLIRTransforms
+ MLIRVCIXDialect
+ MLIRVCIXTransforms
MLIRVectorDialect
MLIRVectorTransforms
)
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp
index ff8e78a668e0f1..7001e28212025f 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp
@@ -19,6 +19,8 @@
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
+#include "mlir/Dialect/VCIX/Transforms.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
#include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h"
#include "mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h"
#include "mlir/Dialect/X86Vector/Transforms.h"
@@ -53,6 +55,8 @@ struct LowerVectorToLLVMPass
registry.insert<amx::AMXDialect>();
if (x86Vector)
registry.insert<x86vector::X86VectorDialect>();
+ if (vcix)
+ registry.insert<vcix::VCIXDialect>();
}
void runOnOperation() override;
};
@@ -110,6 +114,10 @@ void LowerVectorToLLVMPass::runOnOperation() {
configureX86VectorLegalizeForExportTarget(target);
populateX86VectorLegalizeForLLVMExportPatterns(converter, patterns);
}
+ if (vcix) {
+ configureVCIXLegalizeForExportTarget(target);
+ populateVCIXLegalizeForLLVMExportPatterns(converter, patterns);
+ }
if (failed(
applyPartialConversion(getOperation(), target, std::move(patterns))))
diff --git a/mlir/lib/Dialect/VCIX/CMakeLists.txt b/mlir/lib/Dialect/VCIX/CMakeLists.txt
index f33061b2d87cff..9f57627c321fb0 100644
--- a/mlir/lib/Dialect/VCIX/CMakeLists.txt
+++ b/mlir/lib/Dialect/VCIX/CMakeLists.txt
@@ -1 +1,2 @@
add_subdirectory(IR)
+add_subdirectory(Transforms)
diff --git a/mlir/lib/Dialect/VCIX/Transforms/CMakeLists.txt b/mlir/lib/Dialect/VCIX/Transforms/CMakeLists.txt
new file mode 100644
index 00000000000000..5586f18195bfaf
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/Transforms/CMakeLists.txt
@@ -0,0 +1,12 @@
+add_mlir_dialect_library(MLIRVCIXTransforms
+ LegalizeForLLVMExport.cpp
+
+ DEPENDS
+ MLIRVCIXConversionsIncGen
+
+ LINK_LIBS PUBLIC
+ MLIRVCIXDialect
+ MLIRIR
+ MLIRLLVMCommonConversion
+ MLIRLLVMDialect
+ )
diff --git a/mlir/lib/Dialect/VCIX/Transforms/LegalizeForLLVMExport.cpp b/mlir/lib/Dialect/VCIX/Transforms/LegalizeForLLVMExport.cpp
new file mode 100644
index 00000000000000..1c5c196a9a0dc0
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/Transforms/LegalizeForLLVMExport.cpp
@@ -0,0 +1,266 @@
+//===- LegalizeForLLVMExport.cpp - Prepare VCIX for LLVM translation ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Conversion/LLVMCommon/ConversionTarget.h"
+#include "mlir/Conversion/LLVMCommon/Pattern.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+#include "mlir/Dialect/VCIX/Transforms.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
+#include "mlir/Dialect/Vector/IR/VectorOps.h"
+#include "mlir/IR/BuiltinOps.h"
+#include "mlir/IR/PatternMatch.h"
+#include <string>
+
+using namespace mlir;
+
+static constexpr char kVCIXTargetFeaturesAttr[] = "vcix.target_features";
+
+// Get integer value from an attribute and zext it to unsigned integer
+static unsigned getInteger(Attribute attr) {
+ auto intAttr = cast<IntegerAttr>(attr);
+ unsigned value = intAttr.getInt();
+ return value & ((1 << intAttr.getType().getIntOrFloatBitWidth()) - 1);
+}
+
+template <typename SourceOp, typename DestOp>
+struct OneToOneWithPromotionBase : public ConvertOpToLLVMPattern<SourceOp> {
+ using ConvertOpToLLVMPattern<SourceOp>::ConvertOpToLLVMPattern;
+
+ StringAttr getTargetFeatures(Operation *op) const {
+ Operation *func = op;
+ while (func) {
+ func = func->getParentOp();
+ if (isa<FunctionOpInterface>(func))
+ break;
+ }
+ if (!func)
+ llvm_unreachable("Cannot find function-like operation in parents");
+
+ const DictionaryAttr dictAttr = func->getAttrDictionary();
+ if (auto targetFeatures = dictAttr.getNamed(kVCIXTargetFeaturesAttr))
+ return targetFeatures->getValue().cast<StringAttr>();
+ return nullptr;
+ }
+
+ unsigned getXLen(Operation *op) const {
+ StringAttr targetFeatures = getTargetFeatures(op);
+ if (!targetFeatures)
+ return 64;
+
+ if (targetFeatures.getValue().contains("+32bit"))
+ return 32;
+
+ if (targetFeatures.getValue().contains("+64bit"))
+ return 64;
+
+ llvm_unreachable("Unsupported RISC-V target");
+ }
+
+ explicit OneToOneWithPromotionBase(LLVMTypeConverter &converter)
+ : mlir::ConvertOpToLLVMPattern<SourceOp>(converter) {}
+
+ /// Return new IntegerAttr with a value promoted to xlen if necessary
+ IntegerAttr promoteIntAttr(ConversionPatternRewriter &rewriter,
+ Attribute attr, const unsigned xlen) const {
+ Type xlenType = rewriter.getIntegerType(xlen);
+ return rewriter.getIntegerAttr(xlenType, getInteger(attr));
+ }
+
+ /// Convert all operands to required type for correct legalization
+ FailureOr<SmallVector<Value>>
+ convertOperands(ConversionPatternRewriter &rewriter, ValueRange operands,
+ const unsigned xlen) const {
+ SmallVector<Value> res(operands);
+ Value op1 = operands.front();
+ if (auto intType = op1.getType().template dyn_cast<IntegerType>())
+ if (intType.getWidth() < xlen) {
+ Value zext = rewriter.create<LLVM::ZExtOp>(
+ op1.getLoc(), rewriter.getIntegerType(xlen), op1);
+ res[0] = zext;
+ }
+ return res;
+ }
+};
+
+/// Convert vcix operation into intrinsic version with promotion of opcode, rd
+/// rs2 to Xlen
+template <typename SourceOp, typename DestOp>
+struct OneToOneUnaryROWithPromotion
+ : public OneToOneWithPromotionBase<SourceOp, DestOp> {
+ using OneToOneWithPromotionBase<SourceOp, DestOp>::OneToOneWithPromotionBase;
+
+ explicit OneToOneUnaryROWithPromotion(LLVMTypeConverter &converter)
+ : OneToOneWithPromotionBase<SourceOp, DestOp>(converter) {}
+
+ LogicalResult
+ matchAndRewrite(SourceOp op, typename SourceOp::Adaptor adaptor,
+ ConversionPatternRewriter &rewriter) const final {
+ const unsigned xlen = this->getXLen(op);
+ FailureOr<SmallVector<Value>> operands =
+ this->convertOperands(rewriter, adaptor.getOperands(), xlen);
+ if (failed(operands))
+ return failure();
+
+ Operation *newOp = rewriter.create(
+ op->getLoc(), rewriter.getStringAttr(DestOp::getOperationName()),
+ *operands, {}, op->getAttrs());
+ DestOp dstOp = dyn_cast<DestOp>(newOp);
+ Type xlenType = rewriter.getIntegerType(xlen);
+ dstOp.setOpcodeAttr(
+ rewriter.getIntegerAttr(xlenType, getInteger(dstOp.getOpcodeAttr())));
+ dstOp.setRs2Attr(
+ rewriter.getIntegerAttr(xlenType, getInteger(dstOp.getRs2Attr())));
+ dstOp.setRdAttr(
+ rewriter.getIntegerAttr(xlenType, getInteger(dstOp.getRdAttr())));
+ rewriter.eraseOp(op);
+ return success();
+ }
+};
+
+/// Convert vcix operation into intrinsic version with promotion of opcode and
+/// rs2 to Xlen
+template <typename SourceOp, typename DestOp>
+struct OneToOneUnaryWithPromotion
+ : public OneToOneWithPromotionBase<SourceOp, DestOp> {
+ using OneToOneWithPromotionBase<SourceOp, DestOp>::OneToOneWithPromotionBase;
+
+ explicit OneToOneUnaryWithPromotion(LLVMTypeConverter &converter)
+ : OneToOneWithPromotionBase<SourceOp, DestOp>(converter) {}
+
+ LogicalResult
+ matchAndRewrite(SourceOp op, typename SourceOp::Adaptor adaptor,
+ ConversionPatternRewriter &rewriter) const final {
+ const unsigned xlen = this->getXLen(op);
+ FailureOr<SmallVector<Value>> operands =
+ this->convertOperands(rewriter, adaptor.getOperands(), xlen);
+ if (failed(operands))
+ return failure();
+
+ Operation *newOp = rewriter.create(
+ op->getLoc(), rewriter.getStringAttr(DestOp::getOperationName()),
+ *operands, op->getResultTypes(), op->getAttrs());
+ DestOp dstOp = dyn_cast<DestOp>(newOp);
+ dstOp.setOpcodeAttr(
+ this->promoteIntAttr(rewriter, dstOp.getOpcodeAttr(), xlen));
+ dstOp.setRs2Attr(this->promoteIntAttr(rewriter, dstOp.getRs2Attr(), xlen));
+ rewriter.replaceOp(op, newOp);
+ return success();
+ }
+};
+
+/// Convert vcix operation into intrinsic version with promotion of opcode and
+/// rd to Xlen
+template <typename SourceOp, typename DestOp>
+struct OneToOneBinaryROWithPromotion
+ : public OneToOneWithPromotionBase<SourceOp, DestOp> {
+ using OneToOneWithPromotionBase<SourceOp, DestOp>::OneToOneWithPromotionBase;
+
+ explicit OneToOneBinaryROWithPromotion(LLVMTypeConverter &converter)
+ : OneToOneWithPromotionBase<SourceOp, DestOp>(converter) {}
+
+ LogicalResult
+ matchAndRewrite(SourceOp op, typename SourceOp::Adaptor adaptor,
+ ConversionPatternRewriter &rewriter) const final {
+ const unsigned xlen = this->getXLen(op);
+ FailureOr<SmallVector<Value>> operands =
+ this->convertOperands(rewriter, adaptor.getOperands(), xlen);
+ if (failed(operands))
+ return failure();
+
+ Operation *newOp = rewriter.create(
+ op->getLoc(), rewriter.getStringAttr(DestOp::getOperationName()),
+ *operands, {}, op->getAttrs());
+ DestOp dstOp = dyn_cast<DestOp>(newOp);
+ dstOp.setOpcodeAttr(
+ this->promoteIntAttr(rewriter, dstOp.getOpcodeAttr(), xlen));
+ dstOp.setRdAttr(this->promoteIntAttr(rewriter, dstOp.getRdAttr(), xlen));
+
+ rewriter.eraseOp(op);
+ return success();
+ }
+};
+
+/// Convert vcix operation into intrinsic version with promotion of opcode to
+/// Xlen
+template <typename SourceOp, typename DestOp>
+struct OneToOneWithPromotion
+ : public OneToOneWithPromotionBase<SourceOp, DestOp> {
+ using OneToOneWithPromotionBase<SourceOp, DestOp>::OneToOneWithPromotionBase;
+
+ explicit OneToOneWithPromotion(LLVMTypeConverter &converter)
+ : OneToOneWithPromotionBase<SourceOp, DestOp>(converter) {}
+
+ LogicalResult
+ matchAndRewrite(SourceOp op, typename SourceOp::Adaptor adaptor,
+ ConversionPatternRewriter &rewriter) const final {
+ const unsigned xlen = this->getXLen(op);
+ FailureOr<SmallVector<Value>> operands =
+ this->convertOperands(rewriter, adaptor.getOperands(), xlen);
+ if (failed(operands))
+ return failure();
+
+ Operation *newOp = rewriter.create(
+ op->getLoc(), rewriter.getStringAttr(DestOp::getOperationName()),
+ *operands, op->getResultTypes(), op->getAttrs());
+ DestOp dstOp = dyn_cast<DestOp>(newOp);
+ dstOp.setOpcodeAttr(
+ this->promoteIntAttr(rewriter, dstOp.getOpcodeAttr(), xlen));
+
+ if (op->getResultTypes().empty())
+ rewriter.eraseOp(op);
+ else
+ rewriter.replaceOp(op, newOp);
+
+ return success();
+ }
+};
+
+/// Populate the given list with patterns that convert from VCIX to LLVM.
+void mlir::populateVCIXLegalizeForLLVMExportPatterns(
+ LLVMTypeConverter &converter, RewritePatternSet &patterns) {
+ // Populate conversion patterns
+ patterns.add<
+ OneToOneUnaryWithPromotion<vcix::UnaryOp, vcix::UnaryIntrinOp>,
+ OneToOneUnaryROWithPromotion<vcix::UnaryROOp, vcix::UnaryIntrinROOp>,
+ OneToOneBinaryROWithPromotion<vcix::BinaryROOp, vcix::BinaryIntrinROOp>,
+ OneToOneWithPromotion<vcix::BinaryOp, vcix::BinaryIntrinOp>,
+ OneToOneWithPromotion<vcix::TernaryOp, vcix::TernaryIntrinOp>,
+ OneToOneWithPromotion<vcix::TernaryROOp, vcix::TernaryIntrinROOp>,
+ OneToOneWithPromotion<vcix::WideTernaryOp, vcix::WideTernaryIntrinOp>,
+ OneToOneWithPromotion<vcix::WideTernaryROOp,
+ vcix::WideTernaryIntrinROOp>>(converter);
+}
+
+void mlir::configureVCIXLegalizeForExportTarget(LLVMConversionTarget &target) {
+ // During legalization some operation may zext operands to simplify conversion
+ // to LLVM IR later
+ // clang-format off
+ target.addLegalOp<LLVM::ZExtOp,
+ LLVM::UndefOp,
+ LLVM::vector_extract,
+ LLVM::vector_insert,
+ LLVM::BitcastOp>();
+ target.addLegalOp<vcix::UnaryIntrinOp,
+ vcix::UnaryIntrinROOp,
+ vcix::BinaryIntrinOp,
+ vcix::BinaryIntrinROOp,
+ vcix::TernaryIntrinOp,
+ vcix::TernaryIntrinROOp,
+ vcix::WideTernaryIntrinOp,
+ vcix::WideTernaryIntrinROOp>();
+ target.addIllegalOp<vcix::UnaryOp,
+ vcix::UnaryROOp,
+ vcix::BinaryOp,
+ vcix::BinaryROOp,
+ vcix::TernaryOp,
+ vcix::TernaryROOp,
+ vcix::WideTernaryOp,
+ vcix::WideTernaryROOp>();
+ // clang-format on
+}
diff --git a/mlir/test/Dialect/VCIX/legalize-for-llvm-rv32.mlir b/mlir/test/Dialect/VCIX/legalize-for-llvm-rv32.mlir
new file mode 100644
index 00000000000000..175ed62c49dc40
--- /dev/null
+++ b/mlir/test/Dialect/VCIX/legalize-for-llvm-rv32.mlir
@@ -0,0 +1,1071 @@
+// RUN: mlir-opt %s -convert-vector-to-llvm="enable-vcix" -convert-func-to-llvm -reconcile-unrealized-casts | FileCheck %s
+
+// -----
+// CHECK-LABEL: llvm.func @unary_ro_e8mf8(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 0 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8mf8(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8mf8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e8mf4(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 1 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8mf4(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e8mf2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 2 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8mf2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e8m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 3 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8m1(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e8m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 4 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8m2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e8m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 5 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8m4(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e8m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 6 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8m8(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e8m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_e8mf8(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[1]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[1]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[1]xi8>
+// CHECK: }
+func.func @unary_e8mf8(%rvl: ui32) -> vector<[1] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i8>
+ return %0 : vector<[1] x i8>
+}
+
+// CHECK-LABEL: llvm.func @unary_e8mf4(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[2]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[2]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[2]xi8>
+// CHECK: }
+func.func @unary_e8mf4(%rvl: ui32) -> vector<[2] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i8>
+ return %0 : vector<[2] x i8>
+}
+
+// CHECK-LABEL: llvm.func @unary_e8mf2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[4]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[4]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[4]xi8>
+// CHECK: }
+func.func @unary_e8mf2(%rvl: ui32) -> vector<[4] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i8>
+ return %0 : vector<[4] x i8>
+}
+
+// CHECK-LABEL: llvm.func @unary_e8m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[8]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[8]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[8]xi8>
+// CHECK: }
+func.func @unary_e8m1(%rvl: ui32) -> vector<[8] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i8>
+ return %0 : vector<[8] x i8>
+}
+
+// CHECK-LABEL: llvm.func @unary_e8m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[16]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[16]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[16]xi8>
+// CHECK: }
+func.func @unary_e8m2(%rvl: ui32) -> vector<[16] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i8>
+ return %0 : vector<[16] x i8>
+}
+
+// CHECK-LABEL: llvm.func @unary_e8m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[32]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[32]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[32]xi8>
+// CHECK: }
+func.func @unary_e8m4(%rvl: ui32) -> vector<[32] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[32] x i8>
+ return %0 : vector<[32] x i8>
+}
+
+// CHECK-LABEL: llvm.func @unary_e8m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[64]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[64]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[64]xi8>
+// CHECK: }
+func.func @unary_e8m8(%rvl: ui32) -> vector<[64] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[64] x i8>
+ return %0 : vector<[64] x i8>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_ro_e16mf4(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 7 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e16mf4(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e16mf2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 8 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e16mf2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e16m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 9 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e16m1(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e16m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 10 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e16m2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e16m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 11 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e16m4(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e16m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 12 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e16m8(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e16m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_e16mf4(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[1]xi16> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[1]xi16>
+// CHECK: llvm.return %[[VAL_2]] : vector<[1]xi16>
+// CHECK: }
+func.func @unary_e16mf4(%rvl: ui32) -> vector<[1] x i16> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i16>
+ return %0 : vector<[1] x i16>
+}
+
+// CHECK-LABEL: llvm.func @unary_e16mf2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[2]xi16> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[2]xi16>
+// CHECK: llvm.return %[[VAL_2]] : vector<[2]xi16>
+// CHECK: }
+func.func @unary_e16mf2(%rvl: ui32) -> vector<[2] x i16> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i16>
+ return %0 : vector<[2] x i16>
+}
+
+// CHECK-LABEL: llvm.func @unary_e16m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[4]xi16> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[4]xi16>
+// CHECK: llvm.return %[[VAL_2]] : vector<[4]xi16>
+// CHECK: }
+func.func @unary_e16m1(%rvl: ui32) -> vector<[4] x i16> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i16>
+ return %0 : vector<[4] x i16>
+}
+
+// CHECK-LABEL: llvm.func @unary_e16m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[8]xi16> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[8]xi16>
+// CHECK: llvm.return %[[VAL_2]] : vector<[8]xi16>
+// CHECK: }
+func.func @unary_e16m2(%rvl: ui32) -> vector<[8] x i16> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i16>
+ return %0 : vector<[8] x i16>
+}
+
+// CHECK-LABEL: llvm.func @unary_e16m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[16]xi16> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[16]xi16>
+// CHECK: llvm.return %[[VAL_2]] : vector<[16]xi16>
+// CHECK: }
+func.func @unary_e16m4(%rvl: ui32) -> vector<[16] x i16> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i16>
+ return %0 : vector<[16] x i16>
+}
+
+// CHECK-LABEL: llvm.func @unary_e16m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[32]xi16> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[32]xi16>
+// CHECK: llvm.return %[[VAL_2]] : vector<[32]xi16>
+// CHECK: }
+func.func @unary_e16m8(%rvl: ui32) -> vector<[32] x i16> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[32] x i16>
+ return %0 : vector<[32] x i16>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_ro_e32mf2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 13 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e32mf2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e32m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 14 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e32m1(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e32m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 15 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e32m2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e32m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 16 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e32m4(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e32m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 17 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e32m8(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e32m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_e32mf2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[1]xi32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[1]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[1]xi32>
+// CHECK: }
+func.func @unary_e32mf2(%rvl: ui32) -> vector<[1] x i32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i32>
+ return %0 : vector<[1] x i32>
+}
+
+// CHECK-LABEL: llvm.func @unary_e32m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[2]xi32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[2]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[2]xi32>
+// CHECK: }
+func.func @unary_e32m1(%rvl: ui32) -> vector<[2] x i32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i32>
+ return %0 : vector<[2] x i32>
+}
+
+// CHECK-LABEL: llvm.func @unary_e32m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[4]xi32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[4]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[4]xi32>
+// CHECK: }
+func.func @unary_e32m2(%rvl: ui32) -> vector<[4] x i32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i32>
+ return %0 : vector<[4] x i32>
+}
+
+// CHECK-LABEL: llvm.func @unary_e32m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[8]xi32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[8]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[8]xi32>
+// CHECK: }
+func.func @unary_e32m4(%rvl: ui32) -> vector<[8] x i32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i32>
+ return %0 : vector<[8] x i32>
+}
+
+// CHECK-LABEL: llvm.func @unary_e32m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[16]xi32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[16]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[16]xi32>
+// CHECK: }
+func.func @unary_e32m8(%rvl: ui32) -> vector<[16] x i32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i32>
+ return %0 : vector<[16] x i32>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_ro_e64m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 18 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e64m1(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e64m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e64m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 19 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e64m2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e64m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e64m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 20 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e64m4(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e64m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e64m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 21 : i32}> : (i32, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e64m8(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ vcix.unary.ro e64m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+ return
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_e64m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[1]xi64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[1]xi64>
+// CHECK: llvm.return %[[VAL_2]] : vector<[1]xi64>
+// CHECK: }
+func.func @unary_e64m1(%rvl: ui32) -> vector<[1] x i64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i64>
+ return %0 : vector<[1] x i64>
+}
+
+// CHECK-LABEL: llvm.func @unary_e64m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[2]xi64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[2]xi64>
+// CHECK: llvm.return %[[VAL_2]] : vector<[2]xi64>
+// CHECK: }
+func.func @unary_e64m2(%rvl: ui32) -> vector<[2] x i64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i64>
+ return %0 : vector<[2] x i64>
+}
+
+// CHECK-LABEL: llvm.func @unary_e64m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[4]xi64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[4]xi64>
+// CHECK: llvm.return %[[VAL_2]] : vector<[4]xi64>
+// CHECK: }
+func.func @unary_e64m4(%rvl: ui32) -> vector<[4] x i64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i64>
+ return %0 : vector<[4] x i64>
+}
+
+// CHECK-LABEL: llvm.func @unary_e64m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i32) -> vector<[8]xi64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[8]xi64>
+// CHECK: llvm.return %[[VAL_2]] : vector<[8]xi64>
+// CHECK: }
+func.func @unary_e64m8(%rvl: ui32) -> vector<[8] x i64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i32
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i64>
+ return %0 : vector<[8] x i64>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @binary_vv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32, rd = 30 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @binary_vv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @binary_vv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<[4]xf32>
+// CHECK: }
+func.func @binary_vv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL: llvm.func @binary_xv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32, rd = 30 : i32}> : (i32, vector<[4]xf32>, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @binary_xv_ro(%op1: i32, %op2 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i32, vector<[4] x f32>, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @binary_xv(
+// CHECK-SAME: %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<[4]xf32>
+// CHECK: }
+func.func @binary_xv(%op1: i32, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL: llvm.func @binary_fv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i32, rd = 30 : i32}> : (f32, vector<[4]xf32>, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @binary_fv_ro(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.binary.ro %op1, %op2, %rvl { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<[4] x f32>, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @binary_fv(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<[4]xf32>
+// CHECK: }
+func.func @binary_fv(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL: llvm.func @binary_iv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i32
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32, rd = 30 : i32}> : (i32, vector<[4]xf32>, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @binary_iv_ro(%op2 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ vcix.binary.ro %const, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<[4] x f32>, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @binary_iv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i32
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.binary"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK: }
+func.func @binary_iv(%op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ %0 = vcix.binary %const, %op2, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @ternary_vvv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @ternary_vvv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @ternary_vvv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK: }
+func.func @ternary_vvv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL: llvm.func @ternary_xvv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @ternary_xvv_ro(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @ternary_xvv(
+// CHECK-SAME: %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK: }
+func.func @ternary_xvv(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL: llvm.func @ternary_fvv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @ternary_fvv_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @ternary_fvv(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK: }
+func.func @ternary_fvv(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL: llvm.func @ternary_ivv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i32
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @ternary_ivv_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ vcix.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @ternary_ivv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i32
+// CHECK: %[[VAL_5:.*]] = "vcix.intrin.ternary"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_5]] : vector<[4]xf32>
+// CHECK: }
+func.func @ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ %0 = vcix.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @wide_ternary_vvw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf64>, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @wide_ternary_vvw_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_vvw(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i32) -> vector<[4]xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf64>, i32) -> vector<[4]xf64>
+// CHECK: llvm.return %[[VAL_4]] : vector<[4]xf64>
+// CHECK: }
+func.func @wide_ternary_vvw(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %0: vector<[4] x f64>
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_xvw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf64>, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @wide_ternary_xvw_ro(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f64>, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_xvw(
+// CHECK-SAME: %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i32) -> vector<[4]xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf64>, i32) -> vector<[4]xf64>
+// CHECK: llvm.return %[[VAL_4]] : vector<[4]xf64>
+// CHECK: }
+func.func @wide_ternary_xvw(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %0 : vector<[4] x f64>
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_fvw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, vector<[4]xf64>, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @wide_ternary_fvw_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_fvw(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i32) -> vector<[4]xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, vector<[4]xf64>, i32) -> vector<[4]xf64>
+// CHECK: llvm.return %[[VAL_2]] : vector<[4]xf64>
+// CHECK: }
+func.func @wide_ternary_fvw(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %op3 : vector<[4] x f64>
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_ivw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf64>, %[[VAL_2:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i32
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf64>, i32) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @wide_ternary_ivw_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ vcix.wide.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui32)
+ return
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_ivv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf64>, %[[VAL_2:.*]]: i32) -> vector<[4]xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i32
+// CHECK: %[[VAL_5:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf64>, i32) -> vector<[4]xf64>
+// CHECK: llvm.return %[[VAL_1]] : vector<[4]xf64>
+// CHECK: }
+func.func @wide_ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ %0 = vcix.wide.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+ return %op3 : vector<[4] x f64>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @fixed_binary_vv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32, rd = 30 : i32}> : (vector<4xf32>, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_binary_vv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_vv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_2]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_binary_vv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_xv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: i32,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32, rd = 30 : i32}> : (i32, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_binary_xv_ro(%op1: i32, %op2 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i32, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_xv(
+// CHECK-SAME: %[[VAL_0:.*]]: i32,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_2]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_binary_xv(%op1: i32, %op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (i32, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_fv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: f32,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 1 : i32, rd = 30 : i32}> : (f32, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_binary_fv_ro(%op1: f32, %op2 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.binary.ro %op1, %op2 { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_fv(
+// CHECK-SAME: %[[VAL_0:.*]]: f32,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 1 : i32}> : (f32, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_2]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_binary_fv(%op1: f32, %op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.binary %op1, %op2 { opcode = 1 : i1 } : (f32, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_iv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_2:.*]] = llvm.zext %[[VAL_1]] : i5 to i32
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_2]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32}> : (i32, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_binary_iv_ro(%op2 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ vcix.binary.ro %const, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_iv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_2:.*]] = llvm.zext %[[VAL_1]] : i5 to i32
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_2]], %[[VAL_0]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_binary_iv(%op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ %0 = vcix.binary %const, %op2 { opcode = 3 : i2 } : (i5, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @fixed_ternary_vvv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_ternary_vvv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_vvv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_ternary_vvv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_xvv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_ternary_xvv_ro(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_xvv(
+// CHECK-SAME: %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_ternary_xvv(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_fvv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i32}> : (f32, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_ternary_fvv_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_fvv(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i32}> : (f32, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_ternary_fvv(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_ivv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i32
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_ternary_ivv_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ vcix.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_ivv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i32
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_4]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ %0 = vcix.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_vvw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_wide_ternary_vvw_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_vvw(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK: llvm.return %[[VAL_3]] : vector<4xf64>
+// CHECK: }
+func.func @fixed_wide_ternary_vvw(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %0 : vector<4 x f64>
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_xvw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_wide_ternary_xvw_ro(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_xvw(
+// CHECK-SAME: %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK: llvm.return %[[VAL_3]] : vector<4xf64>
+// CHECK: }
+func.func @fixed_wide_ternary_xvw(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %0 : vector<4 x f64>
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_fvw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i32}> : (f32, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_wide_ternary_fvw_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_fvw(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i32}> : (f32, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK: llvm.return %[[VAL_2]] : vector<4xf64>
+// CHECK: }
+func.func @fixed_wide_ternary_fvw(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %op3 : vector<4 x f64>
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_ivw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf64>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i32
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_wide_ternary_ivw_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ vcix.wide.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_ivv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i32
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK: llvm.return %[[VAL_1]] : vector<4xf64>
+// CHECK: }
+func.func @fixed_wide_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ %0 = vcix.wide.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %op3 : vector<4 x f64>
+}
diff --git a/mlir/test/Dialect/VCIX/legalize-for-llvm-rv64.mlir b/mlir/test/Dialect/VCIX/legalize-for-llvm-rv64.mlir
new file mode 100644
index 00000000000000..6fdf390363f2eb
--- /dev/null
+++ b/mlir/test/Dialect/VCIX/legalize-for-llvm-rv64.mlir
@@ -0,0 +1,1071 @@
+// RUN: mlir-opt %s -convert-vector-to-llvm="enable-vcix" -convert-func-to-llvm -reconcile-unrealized-casts | FileCheck %s
+
+// -----
+// CHECK-LABEL: llvm.func @unary_ro_e8mf8(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 0 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8mf8(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e8mf8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e8mf4(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 1 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8mf4(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e8mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e8mf2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 2 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8mf2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e8mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e8m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 3 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8m1(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e8m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e8m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 4 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8m2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e8m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e8m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 5 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8m4(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e8m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e8m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 6 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e8m8(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e8m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_e8mf8(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[1]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[1]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[1]xi8>
+// CHECK: }
+func.func @unary_e8mf8(%rvl: ui64) -> vector<[1] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[1] x i8>
+ return %0 : vector<[1] x i8>
+}
+
+// CHECK-LABEL: llvm.func @unary_e8mf4(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[2]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[2]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[2]xi8>
+// CHECK: }
+func.func @unary_e8mf4(%rvl: ui64) -> vector<[2] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[2] x i8>
+ return %0 : vector<[2] x i8>
+}
+
+// CHECK-LABEL: llvm.func @unary_e8mf2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[4]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[4]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[4]xi8>
+// CHECK: }
+func.func @unary_e8mf2(%rvl: ui64) -> vector<[4] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[4] x i8>
+ return %0 : vector<[4] x i8>
+}
+
+// CHECK-LABEL: llvm.func @unary_e8m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[8]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[8]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[8]xi8>
+// CHECK: }
+func.func @unary_e8m1(%rvl: ui64) -> vector<[8] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[8] x i8>
+ return %0 : vector<[8] x i8>
+}
+
+// CHECK-LABEL: llvm.func @unary_e8m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[16]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[16]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[16]xi8>
+// CHECK: }
+func.func @unary_e8m2(%rvl: ui64) -> vector<[16] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[16] x i8>
+ return %0 : vector<[16] x i8>
+}
+
+// CHECK-LABEL: llvm.func @unary_e8m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[32]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[32]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[32]xi8>
+// CHECK: }
+func.func @unary_e8m4(%rvl: ui64) -> vector<[32] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[32] x i8>
+ return %0 : vector<[32] x i8>
+}
+
+// CHECK-LABEL: llvm.func @unary_e8m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[64]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[64]xi8>
+// CHECK: llvm.return %[[VAL_2]] : vector<[64]xi8>
+// CHECK: }
+func.func @unary_e8m8(%rvl: ui64) -> vector<[64] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[64] x i8>
+ return %0 : vector<[64] x i8>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_ro_e16mf4(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 7 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e16mf4(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e16mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e16mf2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 8 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e16mf2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e16mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e16m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 9 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e16m1(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e16m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e16m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 10 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e16m2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e16m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e16m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 11 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e16m4(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e16m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e16m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 12 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e16m8(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e16m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_e16mf4(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[1]xi16> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[1]xi16>
+// CHECK: llvm.return %[[VAL_2]] : vector<[1]xi16>
+// CHECK: }
+func.func @unary_e16mf4(%rvl: ui64) -> vector<[1] x i16> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[1] x i16>
+ return %0 : vector<[1] x i16>
+}
+
+// CHECK-LABEL: llvm.func @unary_e16mf2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[2]xi16> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[2]xi16>
+// CHECK: llvm.return %[[VAL_2]] : vector<[2]xi16>
+// CHECK: }
+func.func @unary_e16mf2(%rvl: ui64) -> vector<[2] x i16> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[2] x i16>
+ return %0 : vector<[2] x i16>
+}
+
+// CHECK-LABEL: llvm.func @unary_e16m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[4]xi16> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[4]xi16>
+// CHECK: llvm.return %[[VAL_2]] : vector<[4]xi16>
+// CHECK: }
+func.func @unary_e16m1(%rvl: ui64) -> vector<[4] x i16> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[4] x i16>
+ return %0 : vector<[4] x i16>
+}
+
+// CHECK-LABEL: llvm.func @unary_e16m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[8]xi16> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[8]xi16>
+// CHECK: llvm.return %[[VAL_2]] : vector<[8]xi16>
+// CHECK: }
+func.func @unary_e16m2(%rvl: ui64) -> vector<[8] x i16> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[8] x i16>
+ return %0 : vector<[8] x i16>
+}
+
+// CHECK-LABEL: llvm.func @unary_e16m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[16]xi16> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[16]xi16>
+// CHECK: llvm.return %[[VAL_2]] : vector<[16]xi16>
+// CHECK: }
+func.func @unary_e16m4(%rvl: ui64) -> vector<[16] x i16> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[16] x i16>
+ return %0 : vector<[16] x i16>
+}
+
+// CHECK-LABEL: llvm.func @unary_e16m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[32]xi16> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[32]xi16>
+// CHECK: llvm.return %[[VAL_2]] : vector<[32]xi16>
+// CHECK: }
+func.func @unary_e16m8(%rvl: ui64) -> vector<[32] x i16> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[32] x i16>
+ return %0 : vector<[32] x i16>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_ro_e32mf2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 13 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e32mf2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e32mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e32m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 14 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e32m1(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e32m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e32m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 15 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e32m2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e32m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e32m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 16 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e32m4(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e32m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e32m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 17 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e32m8(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e32m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_e32mf2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[1]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[1]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[1]xi32>
+// CHECK: }
+func.func @unary_e32mf2(%rvl: ui64) -> vector<[1] xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[1] xi32>
+ return %0 : vector<[1] xi32>
+}
+
+// CHECK-LABEL: llvm.func @unary_e32m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[2]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[2]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[2]xi32>
+// CHECK: }
+func.func @unary_e32m1(%rvl: ui64) -> vector<[2] xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[2] xi32>
+ return %0 : vector<[2] xi32>
+}
+
+// CHECK-LABEL: llvm.func @unary_e32m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[4]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[4]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[4]xi32>
+// CHECK: }
+func.func @unary_e32m2(%rvl: ui64) -> vector<[4] xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[4] xi32>
+ return %0 : vector<[4] xi32>
+}
+
+// CHECK-LABEL: llvm.func @unary_e32m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[8]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[8]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[8]xi32>
+// CHECK: }
+func.func @unary_e32m4(%rvl: ui64) -> vector<[8] xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[8] xi32>
+ return %0 : vector<[8] xi32>
+}
+
+// CHECK-LABEL: llvm.func @unary_e32m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[16]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[16]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[16]xi32>
+// CHECK: }
+func.func @unary_e32m8(%rvl: ui64) -> vector<[16] xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[16] xi32>
+ return %0 : vector<[16] xi32>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_ro_e64m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 18 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e64m1(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e64m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e64m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 19 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e64m2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e64m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e64m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 20 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e64m4(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e64m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @unary_ro_e64m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 21 : i32}> : (i64, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @unary_ro_e64m8(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ vcix.unary.ro e64m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+ return
+}
+
+// -----
+// CHECK-LABEL: llvm.func @unary_e64m1(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[1]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[1]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[1]xi32>
+// CHECK: }
+func.func @unary_e64m1(%rvl: ui64) -> vector<[1] xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[1] xi32>
+ return %0 : vector<[1] xi32>
+}
+
+// CHECK-LABEL: llvm.func @unary_e64m2(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[2]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[2]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[2]xi32>
+// CHECK: }
+func.func @unary_e64m2(%rvl: ui64) -> vector<[2] xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[2] xi32>
+ return %0 : vector<[2] xi32>
+}
+
+// CHECK-LABEL: llvm.func @unary_e64m4(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[4]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[4]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[4]xi32>
+// CHECK: }
+func.func @unary_e64m4(%rvl: ui64) -> vector<[4] xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[4] xi32>
+ return %0 : vector<[4] xi32>
+}
+
+// CHECK-LABEL: llvm.func @unary_e64m8(
+// CHECK-SAME: %[[VAL_0:.*]]: i64) -> vector<[8]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[8]xi32>
+// CHECK: llvm.return %[[VAL_2]] : vector<[8]xi32>
+// CHECK: }
+func.func @unary_e64m8(%rvl: ui64) -> vector<[8] xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 0 : i64
+ %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[8] xi32>
+ return %0 : vector<[8] xi32>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @binary_vv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64, rd = 30 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @binary_vv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (vector<[4] x f32>, vector<[4] x f32>, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @binary_vv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<[4]xf32>
+// CHECK: }
+func.func @binary_vv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL: llvm.func @binary_xv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64, rd = 30 : i64}> : (i64, vector<[4]xf32>, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @binary_xv_ro(%op1: i64, %op2 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i64, vector<[4] x f32>, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @binary_xv(
+// CHECK-SAME: %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<[4]xf32>
+// CHECK: }
+func.func @binary_xv(%op1: i64, %op2 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL: llvm.func @binary_fv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i64, rd = 30 : i64}> : (f32, vector<[4]xf32>, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @binary_fv_ro(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.binary.ro %op1, %op2, %rvl { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<[4] x f32>, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @binary_fv(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<[4]xf32>
+// CHECK: }
+func.func @binary_fv(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL: llvm.func @binary_iv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i64
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64, rd = 30 : i64}> : (i64, vector<[4]xf32>, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @binary_iv_ro(%op2 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ vcix.binary.ro %const, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<[4] x f32>, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @binary_iv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i64
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.binary"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK: }
+func.func @binary_iv(%op2 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ %0 = vcix.binary %const, %op2, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @ternary_vvv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @ternary_vvv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @ternary_vvv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK: }
+func.func @ternary_vvv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL: llvm.func @ternary_xvv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @ternary_xvv_ro(%op1: i64, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, vector<[4] x f32>, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @ternary_xvv(
+// CHECK-SAME: %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK: }
+func.func @ternary_xvv(%op1: i64, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL: llvm.func @ternary_fvv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @ternary_fvv_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @ternary_fvv(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK: }
+func.func @ternary_fvv(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL: llvm.func @ternary_ivv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i64
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @ternary_ivv_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ vcix.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @ternary_ivv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i64
+// CHECK: %[[VAL_5:.*]] = "vcix.intrin.ternary"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK: llvm.return %[[VAL_5]] : vector<[4]xf32>
+// CHECK: }
+func.func @ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ %0 = vcix.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+ return %0 : vector<[4] x f32>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @wide_ternary_vvw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf64>, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @wide_ternary_vvw_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_vvw(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i64) -> vector<[4]xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf64>, i64) -> vector<[4]xf64>
+// CHECK: llvm.return %[[VAL_4]] : vector<[4]xf64>
+// CHECK: }
+func.func @wide_ternary_vvw(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) -> vector<[4] x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui64) -> vector<[4] x f64>
+ return %0: vector<[4] x f64>
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_xvw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf64>, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @wide_ternary_xvw_ro(%op1: i64, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, vector<[4] x f64>, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_xvw(
+// CHECK-SAME: %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i64) -> vector<[4]xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf64>, i64) -> vector<[4]xf64>
+// CHECK: llvm.return %[[VAL_4]] : vector<[4]xf64>
+// CHECK: }
+func.func @wide_ternary_xvw(%op1: i64, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) -> vector<[4] x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, vector<[4] x f64>, ui64) -> vector<[4] x f64>
+ return %0 : vector<[4] x f64>
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_fvw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, vector<[4]xf64>, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @wide_ternary_fvw_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_fvw(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i64) -> vector<[4]xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, vector<[4]xf64>, i64) -> vector<[4]xf64>
+// CHECK: llvm.return %[[VAL_2]] : vector<[4]xf64>
+// CHECK: }
+func.func @wide_ternary_fvw(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) -> vector<[4] x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui64) -> vector<[4] x f64>
+ return %op3 : vector<[4] x f64>
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_ivw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf64>, %[[VAL_2:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i64
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf64>, i64) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @wide_ternary_ivw_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ vcix.wide.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui64)
+ return
+}
+
+// CHECK-LABEL: llvm.func @wide_ternary_ivv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf64>, %[[VAL_2:.*]]: i64) -> vector<[4]xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i64
+// CHECK: %[[VAL_5:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf64>, i64) -> vector<[4]xf64>
+// CHECK: llvm.return %[[VAL_1]] : vector<[4]xf64>
+// CHECK: }
+func.func @wide_ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) -> vector<[4] x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ %0 = vcix.wide.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui64) -> vector<[4] x f64>
+ return %op3 : vector<[4] x f64>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @fixed_binary_vv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64, rd = 30 : i64}> : (vector<4xf32>, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_binary_vv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_vv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_2]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_binary_vv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_xv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: i64,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64, rd = 30 : i64}> : (i64, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_binary_xv_ro(%op1: i64, %op2 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i64, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_xv(
+// CHECK-SAME: %[[VAL_0:.*]]: i64,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_2]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_binary_xv(%op1: i64, %op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (i64, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_fv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: f32,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 1 : i64, rd = 30 : i64}> : (f32, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_binary_fv_ro(%op1: f32, %op2 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.binary.ro %op1, %op2 { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_fv(
+// CHECK-SAME: %[[VAL_0:.*]]: f32,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 1 : i64}> : (f32, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_2]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_binary_fv(%op1: f32, %op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.binary %op1, %op2 { opcode = 1 : i1 } : (f32, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_iv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_2:.*]] = llvm.zext %[[VAL_1]] : i5 to i64
+// CHECK: "vcix.intrin.binary.ro"(%[[VAL_2]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64}> : (i64, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_binary_iv_ro(%op2 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ vcix.binary.ro %const, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_binary_iv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_2:.*]] = llvm.zext %[[VAL_1]] : i5 to i64
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_2]], %[[VAL_0]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_binary_iv(%op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ %0 = vcix.binary %const, %op2 { opcode = 3 : i2 } : (i5, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @fixed_ternary_vvv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_ternary_vvv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_vvv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_ternary_vvv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_xvv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_ternary_xvv_ro(%op1: i64, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i64, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_xvv(
+// CHECK-SAME: %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_ternary_xvv(%op1: i64, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i64, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_fvv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i64}> : (f32, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_ternary_fvv_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_fvv(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i64}> : (f32, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_ternary_fvv(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_ivv_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i64
+// CHECK: "vcix.intrin.ternary.ro"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_ternary_ivv_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ vcix.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_ternary_ivv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i64
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK: llvm.return %[[VAL_4]] : vector<4xf32>
+// CHECK: }
+func.func @fixed_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ %0 = vcix.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+ return %0 : vector<4 x f32>
+}
+
+// -----
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_vvw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_wide_ternary_vvw_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_vvw(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK: llvm.return %[[VAL_3]] : vector<4xf64>
+// CHECK: }
+func.func @fixed_wide_ternary_vvw(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %0 : vector<4 x f64>
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_xvw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_wide_ternary_xvw_ro(%op1: i64, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i64, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_xvw(
+// CHECK-SAME: %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK: llvm.return %[[VAL_3]] : vector<4xf64>
+// CHECK: }
+func.func @fixed_wide_ternary_xvw(%op1: i64, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i64, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %0 : vector<4 x f64>
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_fvw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i64}> : (f32, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_wide_ternary_fvw_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_fvw(
+// CHECK-SAME: %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_3:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i64}> : (f32, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK: llvm.return %[[VAL_2]] : vector<4xf64>
+// CHECK: }
+func.func @fixed_wide_ternary_fvw(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %op3 : vector<4 x f64>
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_ivw_ro(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf64>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i64
+// CHECK: "vcix.intrin.wide.ternary.ro"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK: llvm.return
+// CHECK: }
+func.func @fixed_wide_ternary_ivw_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ vcix.wide.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>)
+ return
+}
+
+// CHECK-LABEL: llvm.func @fixed_wide_ternary_ivv(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK: %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i64
+// CHECK: %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK: llvm.return %[[VAL_1]] : vector<4xf64>
+// CHECK: }
+func.func @fixed_wide_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+ %const = arith.constant 1 : i5
+ %0 = vcix.wide.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+ return %op3 : vector<4 x f64>
+}
More information about the Mlir-commits
mailing list