[Mlir-commits] [mlir] [MLIR][RISCV] Add VCIX dialect (PR #74664)

Kolya Panchenko llvmlistbot at llvm.org
Wed Dec 6 14:27:11 PST 2023


https://github.com/nikolaypanchenko created https://github.com/llvm/llvm-project/pull/74664

The changeset adds new dialect to support VCIX intrinsics of `XSfvcp` extension to allow MLIR users to interact with co-processors that are compatible with that extension.

Source: https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software

>From 2257ac950f099d05861940f9744f8b848a35485a Mon Sep 17 00:00:00 2001
From: Kolya Panchenko <kolya.panchenko at sifive.com>
Date: Mon, 6 Nov 2023 09:40:17 -0800
Subject: [PATCH] [MLIR][RISCV] Add VCIX dialect

The changeset adds new dialect to support VCIX intrinsics of XSfvcp
extension to allow MLIR users to interact with co-processors that are
compatible with that extension.

Source: https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
---
 mlir/include/mlir/Conversion/Passes.td        |    5 +-
 mlir/include/mlir/Dialect/CMakeLists.txt      |    1 +
 mlir/include/mlir/Dialect/VCIX/CMakeLists.txt |   12 +
 mlir/include/mlir/Dialect/VCIX/Transforms.h   |   29 +
 mlir/include/mlir/Dialect/VCIX/VCIX.td        |  519 ++++++++
 mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h    |   20 +
 mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td   |   91 ++
 mlir/include/mlir/Dialect/VCIX/VCIXDialect.h  |   28 +
 mlir/include/mlir/InitAllDialects.h           |    2 +
 mlir/include/mlir/Target/LLVMIR/Dialect/All.h |    2 +
 .../Dialect/VCIX/VCIXToLLVMIRTranslation.h    |   31 +
 .../Conversion/VectorToLLVM/CMakeLists.txt    |    2 +
 .../VectorToLLVM/ConvertVectorToLLVMPass.cpp  |    8 +
 mlir/lib/Dialect/CMakeLists.txt               |    1 +
 mlir/lib/Dialect/VCIX/CMakeLists.txt          |    2 +
 mlir/lib/Dialect/VCIX/IR/CMakeLists.txt       |   16 +
 mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp        |   12 +
 mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp      |   28 +
 mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp          |  174 +++
 .../Dialect/VCIX/Transforms/CMakeLists.txt    |   12 +
 .../VCIX/Transforms/LegalizeForLLVMExport.cpp |  266 ++++
 mlir/lib/Target/LLVMIR/CMakeLists.txt         |    1 +
 mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt |    1 +
 .../Target/LLVMIR/Dialect/VCIX/CMakeLists.txt |   17 +
 .../Dialect/VCIX/VCIXToLLVMIRTranslation.cpp  |  215 ++++
 mlir/test/Dialect/VCIX/invalid.mlir           |   57 +
 .../Dialect/VCIX/legalize-for-llvm-rv32.mlir  | 1071 +++++++++++++++++
 .../Dialect/VCIX/legalize-for-llvm-rv64.mlir  | 1071 +++++++++++++++++
 mlir/test/Dialect/VCIX/ops-rv64.mlir          |  531 ++++++++
 mlir/test/Dialect/VCIX/ops.mlir               |  531 ++++++++
 mlir/test/Target/LLVMIR/vcix-rv32.mlir        |  897 ++++++++++++++
 mlir/test/Target/LLVMIR/vcix-rv64.mlir        |  897 ++++++++++++++
 32 files changed, 6549 insertions(+), 1 deletion(-)
 create mode 100644 mlir/include/mlir/Dialect/VCIX/CMakeLists.txt
 create mode 100644 mlir/include/mlir/Dialect/VCIX/Transforms.h
 create mode 100644 mlir/include/mlir/Dialect/VCIX/VCIX.td
 create mode 100644 mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h
 create mode 100644 mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td
 create mode 100644 mlir/include/mlir/Dialect/VCIX/VCIXDialect.h
 create mode 100644 mlir/include/mlir/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.h
 create mode 100644 mlir/lib/Dialect/VCIX/CMakeLists.txt
 create mode 100644 mlir/lib/Dialect/VCIX/IR/CMakeLists.txt
 create mode 100644 mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp
 create mode 100644 mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp
 create mode 100644 mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp
 create mode 100644 mlir/lib/Dialect/VCIX/Transforms/CMakeLists.txt
 create mode 100644 mlir/lib/Dialect/VCIX/Transforms/LegalizeForLLVMExport.cpp
 create mode 100644 mlir/lib/Target/LLVMIR/Dialect/VCIX/CMakeLists.txt
 create mode 100644 mlir/lib/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.cpp
 create mode 100644 mlir/test/Dialect/VCIX/invalid.mlir
 create mode 100644 mlir/test/Dialect/VCIX/legalize-for-llvm-rv32.mlir
 create mode 100644 mlir/test/Dialect/VCIX/legalize-for-llvm-rv64.mlir
 create mode 100644 mlir/test/Dialect/VCIX/ops-rv64.mlir
 create mode 100644 mlir/test/Dialect/VCIX/ops.mlir
 create mode 100644 mlir/test/Target/LLVMIR/vcix-rv32.mlir
 create mode 100644 mlir/test/Target/LLVMIR/vcix-rv64.mlir

diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index 06756ff3df0bb..c03b0137e6e0f 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -1294,6 +1294,10 @@ def ConvertVectorToLLVMPass : Pass<"convert-vector-to-llvm"> {
            "bool", /*default=*/"false",
            "Enables the use of ArmSVE dialect while lowering the vector "
        "dialect.">,
+    Option<"vcix", "enable-vcix",
+           "bool", /*default=*/"false",
+           "Enables the use of VCIX dialect while lowering the vector "
+           "dialect to RISC-V target">,
     Option<"x86Vector", "enable-x86vector",
            "bool", /*default=*/"false",
            "Enables the use of X86Vector dialect while lowering the vector "
@@ -1310,5 +1314,4 @@ def ConvertVectorToSPIRV : Pass<"convert-vector-to-spirv"> {
   let constructor = "mlir::createConvertVectorToSPIRVPass()";
   let dependentDialects = ["spirv::SPIRVDialect"];
 }
-
 #endif // MLIR_CONVERSION_PASSES
diff --git a/mlir/include/mlir/Dialect/CMakeLists.txt b/mlir/include/mlir/Dialect/CMakeLists.txt
index 1c4569ecfa584..1408ced218dbb 100644
--- a/mlir/include/mlir/Dialect/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/CMakeLists.txt
@@ -37,5 +37,6 @@ add_subdirectory(Tosa)
 add_subdirectory(Transform)
 add_subdirectory(UB)
 add_subdirectory(Utils)
+add_subdirectory(VCIX)
 add_subdirectory(Vector)
 add_subdirectory(X86Vector)
diff --git a/mlir/include/mlir/Dialect/VCIX/CMakeLists.txt b/mlir/include/mlir/Dialect/VCIX/CMakeLists.txt
new file mode 100644
index 0000000000000..49d4202bf9d8b
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/CMakeLists.txt
@@ -0,0 +1,12 @@
+add_mlir_dialect(VCIX vcix)
+add_mlir_doc(VCIXOps VCIXOps Dialects/ -gen-dialect-doc -dialect=vcix)
+
+set(LLVM_TARGET_DEFINITIONS VCIX.td)
+mlir_tablegen(VCIXConversions.inc -gen-llvmir-conversions)
+add_public_tablegen_target(MLIRVCIXConversionsIncGen)
+
+set(LLVM_TARGET_DEFINITIONS VCIXAttrs.td)
+mlir_tablegen(VCIXDialectEnums.h.inc -gen-enum-decls)
+mlir_tablegen(VCIXDialectEnums.cpp.inc -gen-enum-defs)
+add_public_tablegen_target(MLIRVCIXDialectEnumIncGen)
+add_dependencies(mlir-headers MLIRVCIXDialectEnumIncGen)
diff --git a/mlir/include/mlir/Dialect/VCIX/Transforms.h b/mlir/include/mlir/Dialect/VCIX/Transforms.h
new file mode 100644
index 0000000000000..3287e0f535f98
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/Transforms.h
@@ -0,0 +1,29 @@
+//===- Transforms.h - VCIX Dialect Transformation Entrypoints ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_VCIX_TRANSFORMS_H
+#define MLIR_DIALECT_VCIX_TRANSFORMS_H
+
+namespace mlir {
+
+class LLVMConversionTarget;
+class LLVMTypeConverter;
+class RewritePatternSet;
+
+/// Collect a set of patterns to lower VCIX ops to ops that map to LLVM
+/// intrinsics.
+void populateVCIXLegalizeForLLVMExportPatterns(LLVMTypeConverter &converter,
+                                               RewritePatternSet &patterns);
+
+/// Configure the target to support lowering VCIX ops to ops that map to LLVM
+/// intrinsics.
+void configureVCIXLegalizeForExportTarget(LLVMConversionTarget &target);
+
+} // namespace mlir
+
+#endif // MLIR_DIALECT_VCIX_TRANSFORMS_H
diff --git a/mlir/include/mlir/Dialect/VCIX/VCIX.td b/mlir/include/mlir/Dialect/VCIX/VCIX.td
new file mode 100644
index 0000000000000..655a563764632
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/VCIX.td
@@ -0,0 +1,519 @@
+//===-- VCIX.td - VCIX dialect operation definitions *- tablegen -*--------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// The file defines the basic operations for the VCIX dialect.
+//
+// The SiFive Vector Coprocessor Interface (VCIX) provides a flexible mechanism
+// to extend application processors with custom coprocessors and
+// variable-latency arithmetic units. The interface offers throughput comparable
+// to that of standard RISC-V vector instructions. To accelerate performance,
+// system designers may use VCIX as a low-latency, high-throughput interface to
+// a coprocessor
+//
+// https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
+//
+//===----------------------------------------------------------------------===//
+#ifndef VCIX
+#define VCIX
+
+include "mlir/IR/EnumAttr.td"
+include "mlir/IR/OpBase.td"
+include "mlir/Dialect/LLVMIR/LLVMOpBase.td"
+include "mlir/Interfaces/SideEffectInterfaces.td"
+
+include "mlir/Dialect/VCIX/VCIXAttrs.td"
+
+//===----------------------------------------------------------------------===//
+// VCIX dialect definition.
+//===----------------------------------------------------------------------===//
+
+def VCIX_Dialect : Dialect {
+  let name = "vcix";
+  let cppNamespace = "::mlir::vcix";
+  let description = [{
+     The SiFive Vector Coprocessor Interface (VCIX) provides a flexible mechanism
+     to extend application processors with custom coprocessors and
+     variable-latency arithmetic units. The interface offers throughput comparable
+     to that of standard RISC-V vector instructions. To accelerate performance,
+     system designers may use VCIX as a low-latency, high-throughput interface to
+     a coprocessor
+
+     https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
+  }];
+
+  let usePropertiesForAttributes = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// VCIX Ops
+//===----------------------------------------------------------------------===//
+class VCIX_Op<string mnemonic, list<Trait> traits = []>
+    : Op<VCIX_Dialect, mnemonic, traits> {}
+
+class VCIX_IntrinOp<string mnemonic, list<Trait> traits = []>
+    : LLVM_OpBase<VCIX_Dialect, "intrin." #mnemonic, traits> {}
+
+//===----------------------------------------------------------------------===//
+// Unary VCIX operations
+//===----------------------------------------------------------------------===//
+def VCIX_UnaryROOp : VCIX_Op<"unary.ro", []> {
+  let summary = "Unary VCIX operation with side effects and without result";
+  let description = [{
+    Unary VCIX operation that has some side effects and does not produce result
+
+    Correponds to
+    ```
+    Mnemonic    funct6 vm  rs2   rs1  funct3  rd     Destination   Sources
+    sf.vc.x     0000-- 1  -----  xs1   100   -----   none          scalar xs1
+    sf.vc.i     0000-- 1  ----- simm   011   -----   none          simm[4:0]
+    ```
+  }];
+
+  let arguments = (ins AnyTypeOf<[I<64>, I<32>, I<5>]>: $op,
+                       RVLType: $rvl,
+                       OpcodeIntAttr: $opcode,
+                       VCIX_SewLmulAttr: $sew_lmul,
+                       RAttr: $rs2,
+                       RAttr: $rd);
+
+  let assemblyFormat = [{
+    $sew_lmul $op `,` $rvl  attr-dict `:` `(` type($op) `,` type($rvl) `)`
+  }];
+
+  let hasVerifier = 1;
+}
+
+def VCIX_UnaryOp : VCIX_Op<"unary", []> {
+  let summary = "unary VCIX operation";
+  let description = [{
+    Unary VCIX operation that produces result
+
+    Correponds to
+    ```
+    Mnemonic    funct6 vm  rs2   rs1  funct3  rd     Destination   Sources
+    sf.vc.v.x   0000-- 0  -----  xs1   100    vd     vector vd     scalar xs1
+    sf.vc.v.i   0000-- 0  ----- simm   011    vd     vector vd     simm[4:0]
+    ```
+  }];
+
+  let arguments = (ins AnyTypeOf<[I<64>, I<32>, I<5>]>: $op,
+                       Optional<RVLType>: $rvl,
+                       OpcodeIntAttr: $opcode,
+                       RAttr: $rs2);
+
+  let results = (outs VectorOfRank1: $result);
+
+  let assemblyFormat = [{
+    $op (`,` $rvl^)?  attr-dict
+      `:` `(` type($op) (`,` type($rvl)^)? `)` `->` type($result)
+  }];
+
+  let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Binary VCIX operations
+//===----------------------------------------------------------------------===//
+def VCIX_BinaryROOp : VCIX_Op<"binary.ro", []> {
+  let summary = "Read-only binary VCIX operation";
+  let description = [{
+    Read-only binary VCIX operation that does not produce result
+
+    Correponds to
+    ```
+    Mnemonic    funct6 vm  rs2   rs1  funct3  rd     Destination   Sources
+    sf.vc.v.vv  0010-- 1   vs2   vs1   000    -----     none       vector vs1, vector vs2
+    sf.vc.v.xv  0010-- 1   vs2   xs1   100    -----     none       scalar xs1, vector vs2
+    sf.vc.v.iv  0010-- 1   vs2  simm   011    -----     none       simm[4:0],  vector vs2
+    sf.vc.v.fv  0010-- 1   vs2   fs1   101    -----     none       scalar fs1, vector vs2
+    ```
+  }];
+
+  let arguments = (ins VectorOfRank1OrScalar: $op1,
+                       VectorOfRank1: $op2,
+                       Optional<RVLType>: $rvl,
+                       OpcodeIntAttr: $opcode,
+                       RAttr: $rd);
+
+  let assemblyFormat = [{
+    $op1 `,` $op2 (`,` $rvl^)? attr-dict `:`
+      `(` type($op1) `,` type($op2) (`,` type($rvl)^)? `)`
+  }];
+
+  let hasVerifier = 1;
+}
+
+def VCIX_BinaryOp : VCIX_Op<"binary", []> {
+  let summary = "binary VCIX operation";
+  let description = [{
+    Binary VCIX operation that produces result
+
+    Correponds to
+    ```
+    Mnemonic    funct6 vm  rs2   rs1  funct3  rd     Destination   Sources
+    sf.vc.v.vv  0010-- 0   vs2   vs1   000    vd     vector vd     vector vs1, vector vs2
+    sf.vc.v.xv  0010-- 0   vs2   xs1   100    vd     vector vd     scalar xs1, vector vs2
+    sf.vc.v.iv  0010-- 0   vs2  simm   011    vd     vector vd     simm[4:0],  vector vs2
+    sf.vc.v.fv  0010-- 0   vs2   fs1   101    vd     vector vd     scalar fs1, vector vs2
+    ```
+  }];
+
+  let arguments = (ins VectorOfRank1OrScalar: $op1,
+                       VectorOfRank1: $op2,
+                       Optional<RVLType>: $rvl,
+                       OpcodeIntAttr: $opcode);
+
+  let results = (outs VectorOfRank1: $result);
+
+  let assemblyFormat = [{
+    $op1 `,` $op2 (`,` $rvl^)? attr-dict `:`
+      `(` type($op1) `,` type($op2) (`,` type($rvl)^)? `)` `->` type($result)
+  }];
+
+  let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Ternary VCIX operations
+//===----------------------------------------------------------------------===//
+def VCIX_TernaryROOp : VCIX_Op<"ternary.ro", []> {
+  let summary = "Ternary VCIX operation";
+  let description = [{
+    Ternary VCIX operation that does not generate result
+
+    Correponds to
+    ```
+    Mnemonic    funct6 vm  rs2   rs1  funct3  rd     Destination   Sources
+    sf.vc.vvv   1010-- 1   vs2   vs1  000     vd        none       vector vs1, vector vs2, vector vd
+    sf.vc.xvv   1010-- 1   vs2   xs1  100     vd        none       scalar xs1, vector vs2, vector vd
+    sf.vc.ivv   1010-- 1   vs2   simm 011     vd        none       simm[4:0], vector vs2, vector vd
+    sf.vc.fvv   10101- 1   vs2   fs1  101     vd        none       scalar fs1, vector vs2, vector vd
+    ```
+  }];
+
+  let arguments = (ins VectorOfRank1OrScalar: $op1,
+                       VectorOfRank1: $op2,
+                       VectorOfRank1: $op3,
+                       Optional<RVLType>: $rvl,
+                       OpcodeIntAttr: $opcode);
+
+  let assemblyFormat = [{
+    $op1 `,` $op2 `,` $op3 (`,` $rvl^)? attr-dict `:`
+      `(` type($op1) `,` type($op2) `,` type($op3) (`,` type($rvl)^)? `)`
+  }];
+
+  let hasVerifier = 1;
+}
+
+def VCIX_TernaryOp : VCIX_Op<"ternary", []> {
+  let summary = "Ternary VCIX operation";
+  let description = [{
+    Ternary VCIX operation that produces result
+
+    Correponds to
+    ```
+    Mnemonic    funct6 vm  rs2   rs1  funct3  rd     Destination   Sources
+    sf.vc.v.vvv 1010-- 0   vs2   vs1  000     vd     vector vd     vector vs1, vector vs2, vector vd
+    sf.vc.v.xvv 1010-- 0   vs2   xs1  100     vd     vector vd     scalar xs1, vector vs2, vector vd
+    sf.vc.v.ivv 1010-- 0   vs2   simm 011     vd     vector vd     simm[4:0], vector vs2, vector vd
+    sf.vc.v.fvv 10101- 0   vs2   fs1  101     vd     vector vd     scalar fs1, vector vs2, vector vd
+    ```
+  }];
+
+  let arguments = (ins VectorOfRank1OrScalar: $op1,
+                       VectorOfRank1: $op2,
+                       VectorOfRank1: $op3,
+                       Optional<RVLType>: $rvl,
+                       OpcodeIntAttr: $opcode);
+
+  let results = (outs VectorOfRank1: $result);
+
+  let assemblyFormat = [{
+    $op1 `,` $op2 `,` $op3 (`,` $rvl^)? attr-dict `:`
+      `(` type($op1) `,` type($op2) `,` type($op3) (`,` type($rvl)^)? `)` `->` type($result)
+  }];
+
+  let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Wide ternary VCIX operations
+//===----------------------------------------------------------------------===//
+def VCIX_WideTernaryROOp : VCIX_Op<"wide.ternary.ro", []> {
+  let summary = "Ternary VCIX operation";
+  let description = [{
+    Wide Ternary VCIX operation that does not produce result
+
+    Correponds to
+    ```
+    Mnemonic    funct6 vm  rs2   rs1  funct3  rd     Destination   Sources
+    sf.vc.vvw   1111-- 1   vs2   vs1  000     vd      none         vector vs1, vector vs2, wide vd
+    sf.vc.xvw   1111-- 1   vs2   xs1  100     vd      none         scalar xs1, vector vs2, wide vd
+    sf.vc.ivw   1111-- 1   vs2   simm 011     vd      none         simm[4:0], vector vs2, wide vd
+    sf.vc.fvw   11111- 1   vs2   fs1  101     vd      none         scalar fs1, vector vs2, wide vd
+    ```
+  }];
+
+  let arguments = (ins VectorOfRank1OrScalar: $op1,
+                       VectorOfRank1: $op2,
+                       VectorOfRank1: $op3,
+                       Optional<RVLType>: $rvl,
+                       OpcodeIntAttr: $opcode);
+
+  let assemblyFormat = [{
+    $op1 `,` $op2 `,` $op3 (`,` $rvl^)? attr-dict `:`
+      `(` type($op1) `,` type($op2) `,` type($op3) (`,` type($rvl)^)? `)`
+  }];
+
+  let hasVerifier = 1;
+}
+
+def VCIX_WideTernaryOp : VCIX_Op<"wide.ternary", []> {
+  let summary = "Ternary VCIX operation";
+  let description = [{
+    Wide Ternary VCIX operation that produces result
+
+    Correponds to
+    ```
+    Mnemonic    funct6 vm  rs2   rs1  funct3  rd     Destination   Sources
+    sf.vc.v.vvw 1111-- 0   vs2   vs1  000     vd     wide vd       vector vs1, vector vs2, wide vd
+    sf.vc.v.xvw 1111-- 0   vs2   xs1  100     vd     wide vd       scalar xs1, vector vs2, wide vd
+    sf.vc.v.ivw 1111-- 0   vs2   simm 011     vd     wide vd       simm[4:0], vector vs2, wide vd
+    sf.vc.v.fvw 11111- 0   vs2   fs1  101     vd     wide vd       scalar fs1, vector vs2, wide vd
+    ```
+  }];
+
+  let arguments = (ins VectorOfRank1OrScalar: $op1,
+                       VectorOfRank1: $op2,
+                       VectorOfRank1: $op3,
+                       Optional<RVLType>: $rvl,
+                       OpcodeIntAttr: $opcode);
+
+  let results = (outs VectorOfRank1: $result);
+
+  let assemblyFormat = [{
+    $op1 `,` $op2 `,` $op3 (`,` $rvl^)? attr-dict `:`
+      `(` type($op1) `,` type($op2) `,` type($op3) (`,` type($rvl)^)? `)` `->` type($result)
+  }];
+
+  let hasVerifier = 1;
+}
+
+def VCIX_UnaryIntrinROOp : VCIX_IntrinOp<"unary.ro"> {
+  let arguments = (ins AnyTypeOf<[I<64>, I<32>, I<5>]>: $op,
+                       RVLIntrinType: $rvl,
+                       OpcodeIntrinIntAttr: $opcode,
+                       VCIX_SewLmulAttr: $sew_lmul,
+                       RIntrinAttr: $rs2,
+                       RIntrinAttr: $rd);
+
+  string llvmBuilder = [{
+      const unsigned xlenWidth = getXlenFromOpcode($opcode);
+      llvm::Type *xlen =
+          llvm::Type::getIntNTy(moduleTranslation.getLLVMContext(), xlenWidth);
+      llvm::Value *opcodeConst = getLLVMConstant(
+          xlen, $opcode, $_location, moduleTranslation);
+
+      llvm::Value *rs2Const = getLLVMConstant(
+          xlen, $rs2, $_location, moduleTranslation);
+
+      llvm::Value *rdConst = getLLVMConstant(
+          xlen, $rd, $_location, moduleTranslation);
+
+      auto intId = getUnaryROIntrinsicId($sew_lmul, op.getOp().getType());
+      createIntrinsicCall(builder, intId,
+                          {opcodeConst, rs2Const, rdConst, $op, $rvl},
+                          {xlen, xlen, xlen});
+  }];
+}
+
+def VCIX_UnaryIntrinOp : VCIX_IntrinOp<"unary"> {
+  let arguments = (ins AnyTypeOf<[I<64>, I<32>, I<5>]>: $op,
+                       Optional<RVLIntrinType>: $rvl,
+                       OpcodeIntrinIntAttr: $opcode,
+                       RIntrinAttr: $rs2);
+
+  let results = (outs VectorOfRank1: $result);
+
+  string llvmBuilder = [{
+      const unsigned xlenWidth = getXlenFromOpcode($opcode);
+      llvm::Type *xlen =
+          llvm::Type::getIntNTy(moduleTranslation.getLLVMContext(), xlenWidth);
+      llvm::Value *opcodeConst = getLLVMConstant(
+          xlen, $opcode, $_location, moduleTranslation);
+
+      llvm::Value *rs2Const = getLLVMConstant(
+          xlen, $rs2, $_location, moduleTranslation);
+
+      VectorType vt = op.getResult().getType().cast<VectorType>();
+      auto intId = getUnaryIntrinsicId(op.getOp().getType(), vt);
+      llvm::Value *rvl =
+          convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+      $result = createIntrinsicCall(
+          builder, intId, {opcodeConst, rs2Const, $op, rvl},
+          {$_resultType, xlen, xlen, xlen});
+  }];
+}
+
+def VCIX_BinaryIntrinROOp : VCIX_IntrinOp<"binary.ro"> {
+  let arguments = (ins VectorOfRank1OrScalarIntrin: $op1,
+                       VectorOfRank1: $op2,
+                       Optional<RVLIntrinType>: $rvl,
+                       OpcodeIntrinIntAttr: $opcode,
+                       RIntrinAttr: $rd);
+
+  string llvmBuilder = [{
+      const unsigned xlenWidth = getXlenFromOpcode($opcode);
+      llvm::Type *xlen =
+          llvm::Type::getIntNTy(moduleTranslation.getLLVMContext(), xlenWidth);
+      llvm::Value *opcodeConst = getLLVMConstant(
+          xlen, $opcode, $_location, moduleTranslation);
+
+      llvm::Value *rdConst = getLLVMConstant(
+          xlen, $rd, $_location, moduleTranslation);
+
+      auto intId = getBinaryROIntrinsicId(op.getOp1().getType());
+      VectorType vt = op.getOp2().getType().cast<VectorType>();
+      llvm::Value *rvl =
+          convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+      createIntrinsicCall(builder, intId,
+                          {opcodeConst, rdConst, $op2, $op1, rvl},
+                          {xlen, $op2->getType(), $op1->getType(), xlen});
+  }];
+}
+
+def VCIX_BinaryIntrinOp : VCIX_IntrinOp<"binary"> {
+  let arguments = (ins VectorOfRank1OrScalarIntrin: $op1,
+                       VectorOfRank1: $op2,
+                       Optional<RVLIntrinType>: $rvl,
+                       OpcodeIntrinIntAttr: $opcode);
+
+  let results = (outs VectorOfRank1: $result);
+
+  string llvmBuilder = [{
+      const unsigned xlenWidth = getXlenFromOpcode($opcode);
+      llvm::Type *xlen = llvm::Type::getIntNTy(
+          moduleTranslation.getLLVMContext(), xlenWidth);
+      llvm::Value *opcodeConst = getLLVMConstant(
+          xlen, $opcode, $_location, moduleTranslation);
+
+      auto intId = getBinaryIntrinsicId(op.getOp1().getType());
+      VectorType vt = op.getResult().getType().cast<VectorType>();
+      llvm::Value *rvl =
+          convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+      $result = createIntrinsicCall(
+          builder, intId, {opcodeConst, $op2, $op1, rvl},
+          {$_resultType, xlen, $op2->getType(), $op1->getType(), xlen});
+  }];
+}
+
+def VCIX_TernaryIntrinROOp : VCIX_IntrinOp<"ternary.ro"> {
+  let arguments = (ins VectorOfRank1OrScalarIntrin: $op1,
+                       VectorOfRank1: $op2,
+                       VectorOfRank1: $op3,
+                       Optional<RVLIntrinType>: $rvl,
+                       OpcodeIntrinIntAttr: $opcode);
+
+  string llvmBuilder = [{
+      const unsigned xlenWidth = getXlenFromOpcode($opcode);
+      llvm::Type *xlen = llvm::Type::getIntNTy(
+          moduleTranslation.getLLVMContext(), xlenWidth);
+      llvm::Value *opcodeConst = getLLVMConstant(
+          xlen, $opcode, $_location, moduleTranslation);
+
+      auto intId = getTernaryROIntrinsicId(op.getOp1().getType());
+      VectorType vt = op.getOp3().getType().cast<VectorType>();
+      llvm::Value *rvl =
+          convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+      createIntrinsicCall(
+          builder, intId, {opcodeConst, $op3, $op2, $op1, rvl},
+          {xlen, $op3->getType(), $op2->getType(), $op1->getType(), xlen});
+  }];
+}
+
+def VCIX_TernaryIntrinOp : VCIX_IntrinOp<"ternary"> {
+  let arguments = (ins VectorOfRank1OrScalarIntrin: $op1,
+                       VectorOfRank1: $op2,
+                       VectorOfRank1: $op3,
+                       Optional<RVLIntrinType>: $rvl,
+                       OpcodeIntrinIntAttr: $opcode);
+  let results = (outs VectorOfRank1: $result);
+
+  string llvmBuilder = [{
+      const unsigned xlenWidth = getXlenFromOpcode($opcode);
+      llvm::Type *xlen = llvm::Type::getIntNTy(
+          moduleTranslation.getLLVMContext(), xlenWidth);
+      llvm::Value *opcodeConst = getLLVMConstant(
+          xlen, $opcode, $_location, moduleTranslation);
+
+      auto intId = getTernaryIntrinsicId(op.getOp1().getType());
+      VectorType vt = op.getResult().getType().cast<VectorType>();
+      llvm::Value *rvl =
+          convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+      $result = createIntrinsicCall(
+          builder, intId, {opcodeConst, $op3, $op2, $op1, rvl},
+          {$_resultType, xlen, $op3->getType(), $op2->getType(), $op1->getType(), xlen});
+  }];
+}
+
+def VCIX_WideTernaryIntrinROOp : VCIX_IntrinOp<"wide.ternary.ro"> {
+  let arguments = (ins VectorOfRank1OrScalarIntrin: $op1,
+                       VectorOfRank1: $op2,
+                       VectorOfRank1: $op3,
+                       Optional<RVLIntrinType>: $rvl,
+                       OpcodeIntrinIntAttr: $opcode);
+
+  string llvmBuilder = [{
+      const unsigned xlenWidth = getXlenFromOpcode($opcode);
+      llvm::Type *xlen = llvm::Type::getIntNTy(
+          moduleTranslation.getLLVMContext(), xlenWidth);
+      llvm::Value *opcodeConst = getLLVMConstant(
+          xlen, $opcode, $_location, moduleTranslation);
+
+      auto intId = getWideTernaryROIntrinsicId(op.getOp1().getType());
+      VectorType vt = op.getOp3().getType().cast<VectorType>();
+      llvm::Value *rvl =
+          convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+      createIntrinsicCall(
+          builder, intId, {opcodeConst, $op3, $op2, $op1, rvl},
+          {xlen, $op3->getType(), $op2->getType(), $op1->getType(), xlen});
+  }];
+}
+
+def VCIX_WideTernaryIntrinOp : VCIX_IntrinOp<"wide.ternary", []> {
+  let arguments = (ins VectorOfRank1OrScalarIntrin: $op1,
+                       VectorOfRank1: $op2,
+                       VectorOfRank1: $op3,
+                       Optional<RVLIntrinType>: $rvl,
+                       OpcodeIntrinIntAttr: $opcode);
+
+  let results = (outs VectorOfRank1: $result);
+
+  string llvmBuilder = [{
+      const unsigned xlenWidth = getXlenFromOpcode($opcode);
+      llvm::Type *xlen = llvm::Type::getIntNTy(
+          moduleTranslation.getLLVMContext(), xlenWidth);
+      llvm::Value *opcodeConst = getLLVMConstant(
+          xlen, $opcode, $_location, moduleTranslation);
+
+      auto intId = getWideTernaryIntrinsicId(op.getOp1().getType());
+      VectorType vt = op.getResult().getType().cast<VectorType>();
+      llvm::Value *rvl =
+          convertRvl(builder, $rvl, vt, xlen, $_location, moduleTranslation);
+
+      $result = createIntrinsicCall(
+          builder, intId, {opcodeConst, $op3, $op2, $op1, rvl},
+          {$_resultType, xlen, $op3->getType(), $op2->getType(), $op1->getType(), xlen});
+  }];
+}
+#endif // VCIX
diff --git a/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h b/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h
new file mode 100644
index 0000000000000..95b66ee2b9a92
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.h
@@ -0,0 +1,20 @@
+//===- VCIXAttr.h - VCIX Dialect Attribute Definition -*- C++ -----------*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_VCIX_TRANSFORMATTRS_H
+#define MLIR_DIALECT_VCIX_TRANSFORMATTRS_H
+
+#include "mlir/IR/Attributes.h"
+#include "mlir/IR/BuiltinAttributes.h"
+
+#include <cstdint>
+#include <optional>
+
+#include "mlir/Dialect/VCIX/VCIXDialectEnums.h.inc"
+
+#endif // MLIR_DIALECT_VCIX_TRANSFORMATTRS_H
diff --git a/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td b/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td
new file mode 100644
index 0000000000000..7f3ecc9a3f2d3
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/VCIXAttrs.td
@@ -0,0 +1,91 @@
+//===- VCIXAttrs.td - VCIX dialect attributes ----*- tablegen -----------*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef MLIR_DIALECT_VCIX_VCIXATTRS
+#define MLIR_DIALECT_VCIX_VCIXATTRS
+
+include "mlir/IR/EnumAttr.td"
+
+//===----------------------------------------------------------------------===//
+// VCIX helper type definitions
+//===----------------------------------------------------------------------===//
+def VectorOfRank1 : AnyTypeOf<[ScalableVectorOfRank<[1]>, VectorOfRank<[1]>]>;
+def VectorOfRank1OrScalar
+    : AnyTypeOf<[VectorOfRank1, I<64>, I<32>, F<16>, F<32>, F<64>, I<5>]>;
+def OpcodeI1Attr : AnyIntegerAttrBase<AnyI<1>, "1-bit integer attribute">;
+def OpcodeI2Attr : AnyIntegerAttrBase<AnyI<2>, "2-bit integer attribute">;
+def OpcodeIntAttr : AnyAttrOf<[OpcodeI1Attr, OpcodeI2Attr]>;
+def RAttr : AnyAttrOf<[AnyIntegerAttrBase<AnyI<5>, "5-bit integer attribute">]>;
+def RVLType : AnyTypeOf<[UI<64>, UI<32>]>;
+
+// Special version for intrinsic version where int attr is zext to i32 or i64
+// depending on xlen of the target
+def VectorOfRank1OrScalarIntrin
+    : AnyTypeOf<[VectorOfRank1, I<64>, I<32>, F<16>, F<32>, F<64>]>;
+def OpcodeIntrinIntAttr : AnyAttrOf<[I64Attr, I32Attr]>;
+def RIntrinAttr : AnyAttrOf<[I64Attr, I32Attr]>;
+def RVLIntrinType : AnyTypeOf<[I<64>, I<32>]>;
+
+def VCIX_e8mf8 : I32EnumAttrCase<"e8mf8", 0, "e8mf8">;
+def VCIX_e8mf4 : I32EnumAttrCase<"e8mf4", 1, "e8mf4">;
+def VCIX_e8mf2 : I32EnumAttrCase<"e8mf2", 2, "e8mf2">;
+def VCIX_e8m1  : I32EnumAttrCase<"e8m1",  3, "e8m1">;
+def VCIX_e8m2  : I32EnumAttrCase<"e8m2",  4, "e8m2">;
+def VCIX_e8m4  : I32EnumAttrCase<"e8m4",  5, "e8m4">;
+def VCIX_e8m8  : I32EnumAttrCase<"e8m8",  6, "e8m8">;
+
+def VCIX_e16mf4 : I32EnumAttrCase<"e16mf4", 7,  "e16mf4">;
+def VCIX_e16mf2 : I32EnumAttrCase<"e16mf2", 8,  "e16mf2">;
+def VCIX_e16m1  : I32EnumAttrCase<"e16m1",  9,  "e16m1">;
+def VCIX_e16m2  : I32EnumAttrCase<"e16m2",  10, "e16m2">;
+def VCIX_e16m4  : I32EnumAttrCase<"e16m4",  11,  "e16m4">;
+def VCIX_e16m8  : I32EnumAttrCase<"e16m8",  12,  "e16m8">;
+
+def VCIX_e32mf2 : I32EnumAttrCase<"e32mf2", 13, "e32mf2">;
+def VCIX_e32m1  : I32EnumAttrCase<"e32m1",  14, "e32m1">;
+def VCIX_e32m2  : I32EnumAttrCase<"e32m2",  15, "e32m2">;
+def VCIX_e32m4  : I32EnumAttrCase<"e32m4",  16, "e32m4">;
+def VCIX_e32m8  : I32EnumAttrCase<"e32m8",  17, "e32m8">;
+
+def VCIX_e64m1  : I32EnumAttrCase<"e64m1",  18, "e64m1">;
+def VCIX_e64m2  : I32EnumAttrCase<"e64m2",  19, "e64m2">;
+def VCIX_e64m4  : I32EnumAttrCase<"e64m4",  20, "e64m4">;
+def VCIX_e64m8  : I32EnumAttrCase<"e64m8",  21, "e64m8">;
+
+def VCIX_SewLmulAttr : I32EnumAttr<"SewLmul",
+    "A list of all possible SEW and LMUL",
+    [
+      VCIX_e8mf8,
+      VCIX_e8mf4,
+      VCIX_e8mf2,
+      VCIX_e8m1,
+      VCIX_e8m2,
+      VCIX_e8m4,
+      VCIX_e8m8,
+
+      VCIX_e16mf4,
+      VCIX_e16mf2,
+      VCIX_e16m1,
+      VCIX_e16m2,
+      VCIX_e16m4,
+      VCIX_e16m8,
+
+      VCIX_e32mf2,
+      VCIX_e32m1,
+      VCIX_e32m2,
+      VCIX_e32m4,
+      VCIX_e32m8,
+
+      VCIX_e64m1,
+      VCIX_e64m2,
+      VCIX_e64m4,
+      VCIX_e64m8,
+    ]> {
+  let cppNamespace = "::mlir::vcix";
+}
+
+#endif // MLIR_DIALECT_VCIX_VCIXATTRS
diff --git a/mlir/include/mlir/Dialect/VCIX/VCIXDialect.h b/mlir/include/mlir/Dialect/VCIX/VCIXDialect.h
new file mode 100644
index 0000000000000..0e795f42f58de
--- /dev/null
+++ b/mlir/include/mlir/Dialect/VCIX/VCIXDialect.h
@@ -0,0 +1,28 @@
+//===- VCIXDialect.h - MLIR Dialect for VCIX --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Target dialect for VCIX in MLIR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_VCIX_VCIXDIALECT_H_
+#define MLIR_DIALECT_VCIX_VCIXDIALECT_H_
+
+#include "mlir/Bytecode/BytecodeOpInterface.h"
+#include "mlir/IR/BuiltinTypes.h"
+#include "mlir/IR/Dialect.h"
+#include "mlir/IR/OpDefinition.h"
+#include "mlir/Interfaces/SideEffectInterfaces.h"
+
+#include "mlir/Dialect/VCIX/VCIXAttrs.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h.inc"
+
+#define GET_OP_CLASSES
+#include "mlir/Dialect/VCIX/VCIX.h.inc"
+
+#endif // MLIR_DIALECT_VCIX_VCIXDIALECT_H_
diff --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h
index 19a62cadaa2e0..3dcb4a41e9d17 100644
--- a/mlir/include/mlir/InitAllDialects.h
+++ b/mlir/include/mlir/InitAllDialects.h
@@ -83,6 +83,7 @@
 #include "mlir/Dialect/Transform/IR/TransformDialect.h"
 #include "mlir/Dialect/Transform/PDLExtension/PDLExtension.h"
 #include "mlir/Dialect/UB/IR/UBOps.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
 #include "mlir/Dialect/Vector/IR/VectorOps.h"
 #include "mlir/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.h"
 #include "mlir/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.h"
@@ -137,6 +138,7 @@ inline void registerAllDialects(DialectRegistry &registry) {
                   tosa::TosaDialect,
                   transform::TransformDialect,
                   ub::UBDialect,
+                  vcix::VCIXDialect,
                   vector::VectorDialect,
                   x86vector::X86VectorDialect>();
   // clang-format on
diff --git a/mlir/include/mlir/Target/LLVMIR/Dialect/All.h b/mlir/include/mlir/Target/LLVMIR/Dialect/All.h
index 5dfc15afb7593..7739dc569437e 100644
--- a/mlir/include/mlir/Target/LLVMIR/Dialect/All.h
+++ b/mlir/include/mlir/Target/LLVMIR/Dialect/All.h
@@ -27,6 +27,7 @@
 #include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h"
 #include "mlir/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.h"
 #include "mlir/Target/LLVMIR/Dialect/SPIRV/SPIRVToLLVMIRTranslation.h"
+#include "mlir/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.h"
 #include "mlir/Target/LLVMIR/Dialect/X86Vector/X86VectorToLLVMIRTranslation.h"
 
 namespace mlir {
@@ -47,6 +48,7 @@ static inline void registerAllToLLVMIRTranslations(DialectRegistry &registry) {
   registerOpenMPDialectTranslation(registry);
   registerROCDLDialectTranslation(registry);
   registerSPIRVDialectTranslation(registry);
+  registerVCIXDialectTranslation(registry);
   registerX86VectorDialectTranslation(registry);
 
   // Extension required for translating GPU offloading Ops.
diff --git a/mlir/include/mlir/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.h b/mlir/include/mlir/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.h
new file mode 100644
index 0000000000000..bd15ab3dd93d5
--- /dev/null
+++ b/mlir/include/mlir/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.h
@@ -0,0 +1,31 @@
+//===- VCIXToLLVMIRTranslation.h - VCIX to LLVM IR --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides registration calls for VCIX dialect to LLVM IR translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_TARGET_LLVMIR_DIALECT_VCIX_VCIXTOLLVMIRTRANSLATION_H
+#define MLIR_TARGET_LLVMIR_DIALECT_VCIX_VCIXTOLLVMIRTRANSLATION_H
+
+namespace mlir {
+
+class DialectRegistry;
+class MLIRContext;
+
+/// Register the VCIX dialect and the translation from it to the LLVM IR in the
+/// given registry;
+void registerVCIXDialectTranslation(DialectRegistry &registry);
+
+/// Register the VCIX dialect and the translation from it in the registry
+/// associated with the given context.
+void registerVCIXDialectTranslation(MLIRContext &context);
+
+} // namespace mlir
+
+#endif // MLIR_TARGET_LLVMIR_DIALECT_VCIX_VCIXTOLLVMIRTRANSLATION_H
diff --git a/mlir/lib/Conversion/VectorToLLVM/CMakeLists.txt b/mlir/lib/Conversion/VectorToLLVM/CMakeLists.txt
index 5fbb50f62395e..0d9e86b7eb39c 100644
--- a/mlir/lib/Conversion/VectorToLLVM/CMakeLists.txt
+++ b/mlir/lib/Conversion/VectorToLLVM/CMakeLists.txt
@@ -19,6 +19,8 @@ add_mlir_conversion_library(MLIRVectorToLLVM
   MLIRMemRefDialect
   MLIRTargetLLVMIRExport
   MLIRTransforms
+  MLIRVCIXDialect
+  MLIRVCIXTransforms
   MLIRVectorDialect
   MLIRVectorTransforms
   )
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp
index ff8e78a668e0f..387378b1c9d35 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp
@@ -19,6 +19,8 @@
 #include "mlir/Dialect/Func/IR/FuncOps.h"
 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
 #include "mlir/Dialect/MemRef/IR/MemRef.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
+#include "mlir/Dialect/VCIX/Transforms.h"
 #include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h"
 #include "mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h"
 #include "mlir/Dialect/X86Vector/Transforms.h"
@@ -53,6 +55,8 @@ struct LowerVectorToLLVMPass
       registry.insert<amx::AMXDialect>();
     if (x86Vector)
       registry.insert<x86vector::X86VectorDialect>();
+    if (vcix)
+      registry.insert<vcix::VCIXDialect>();
   }
   void runOnOperation() override;
 };
@@ -110,6 +114,10 @@ void LowerVectorToLLVMPass::runOnOperation() {
     configureX86VectorLegalizeForExportTarget(target);
     populateX86VectorLegalizeForLLVMExportPatterns(converter, patterns);
   }
+  if (vcix) {
+    configureVCIXLegalizeForExportTarget(target);
+    populateVCIXLegalizeForLLVMExportPatterns(converter, patterns);
+  }
 
   if (failed(
           applyPartialConversion(getOperation(), target, std::move(patterns))))
diff --git a/mlir/lib/Dialect/CMakeLists.txt b/mlir/lib/Dialect/CMakeLists.txt
index 68776a695cac4..c1e3bd6998ef8 100644
--- a/mlir/lib/Dialect/CMakeLists.txt
+++ b/mlir/lib/Dialect/CMakeLists.txt
@@ -37,6 +37,7 @@ add_subdirectory(Tosa)
 add_subdirectory(Transform)
 add_subdirectory(UB)
 add_subdirectory(Utils)
+add_subdirectory(VCIX)
 add_subdirectory(Vector)
 add_subdirectory(X86Vector)
 
diff --git a/mlir/lib/Dialect/VCIX/CMakeLists.txt b/mlir/lib/Dialect/VCIX/CMakeLists.txt
new file mode 100644
index 0000000000000..9f57627c321fb
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/CMakeLists.txt
@@ -0,0 +1,2 @@
+add_subdirectory(IR)
+add_subdirectory(Transforms)
diff --git a/mlir/lib/Dialect/VCIX/IR/CMakeLists.txt b/mlir/lib/Dialect/VCIX/IR/CMakeLists.txt
new file mode 100644
index 0000000000000..063d76b04b884
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/IR/CMakeLists.txt
@@ -0,0 +1,16 @@
+add_mlir_dialect_library(MLIRVCIXDialect
+  VCIXDialect.cpp
+  VCIXAttrs.cpp
+  VCIXOps.cpp
+
+  ADDITIONAL_HEADER_DIRS
+  ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/VCIX
+
+  DEPENDS
+  MLIRVCIXIncGen
+
+  LINK_LIBS PUBLIC
+  MLIRIR
+  MLIRLLVMDialect
+  MLIRSideEffectInterfaces
+  )
diff --git a/mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp b/mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp
new file mode 100644
index 0000000000000..d0562ae451ca1
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/IR/VCIXAttrs.cpp
@@ -0,0 +1,12 @@
+//===- VCIXAttrs.cpp - VCIX Dialect Attribute Definitions -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/VCIX/VCIXAttrs.h"
+#include "mlir/IR/BuiltinTypes.h"
+
+#include "mlir/Dialect/VCIX/VCIXDialectEnums.cpp.inc"
diff --git a/mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp b/mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp
new file mode 100644
index 0000000000000..aa9260e1dbf6b
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/IR/VCIXDialect.cpp
@@ -0,0 +1,28 @@
+//===- VCIXDialect.cpp - MLIR VCIX ops implementation ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the VCIX dialect and its operations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
+#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/OpImplementation.h"
+#include "mlir/IR/TypeUtilities.h"
+
+using namespace mlir;
+
+#include "mlir/Dialect/VCIX/VCIXDialect.cpp.inc"
+
+void vcix::VCIXDialect::initialize() {
+  addOperations<
+#define GET_OP_LIST
+#include "mlir/Dialect/VCIX/VCIX.cpp.inc"
+      >();
+}
diff --git a/mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp b/mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp
new file mode 100644
index 0000000000000..7c1521d246b7d
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/IR/VCIXOps.cpp
@@ -0,0 +1,174 @@
+//===- VCIXOps.cpp - VCIX dialect operations ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
+#include "mlir/Dialect/VCIX/VCIXAttrs.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/Diagnostics.h"
+#include "mlir/IR/OpImplementation.h"
+#include "mlir/IR/TypeUtilities.h"
+#include "mlir/IR/Verifier.h"
+
+using namespace mlir;
+
+#define GET_OP_CLASSES
+#include "mlir/Dialect/VCIX/VCIX.cpp.inc"
+
+static LogicalResult verifyOpcode(Attribute opcodeAttr,
+                                  const unsigned expectedBitSize) {
+  if (auto intAttr = opcodeAttr.dyn_cast<IntegerAttr>())
+    return LogicalResult::success(intAttr.getType().isInteger(expectedBitSize));
+  return failure();
+}
+
+static LogicalResult isWidenType(Type from, Type to) {
+  if (isa<IntegerType>(from)) {
+    return LogicalResult::success(2 * from.cast<IntegerType>().getWidth() ==
+                                  to.cast<IntegerType>().getWidth());
+  }
+  if (isa<FloatType>(from)) {
+    if (from.isF16() && to.isF32())
+      return success();
+    if (from.isF32() && to.isF64())
+      return success();
+  }
+  return failure();
+}
+
+// Return true if type is a scalable vector that encodes LMUL and SEW correctly
+// https://lists.llvm.org/pipermail/llvm-dev/2020-October/145850.html
+static LogicalResult verifyVectorType(Type t) {
+  auto vt = t.dyn_cast<VectorType>();
+  if (!vt || vt.getRank() != 1)
+    return failure();
+  if (!vt.isScalable())
+    return success();
+
+  Type eltTy = vt.getElementType();
+  unsigned sew = 0;
+  if (eltTy.isF32())
+    sew = 32;
+  else if (eltTy.isF64())
+    sew = 64;
+  else if (auto intTy = eltTy.dyn_cast<IntegerType>())
+    sew = intTy.getWidth();
+  else
+    return failure();
+
+  unsigned eltCount = vt.getShape()[0];
+  const unsigned lmul = eltCount * sew / 64;
+  return lmul > 8 ? failure() : success();
+}
+
+template <typename OpT>
+static LogicalResult verifyVCIXOpCommon(OpT op, Value result) {
+  Type op1Type = op.getOp1().getType();
+  VectorType op2Type = op.getOp2().getType().template cast<VectorType>();
+  if (result && op2Type != result.getType())
+    return op.emitOpError("Result type does not match to op2 type");
+
+  if (failed(verifyVectorType(op2Type)))
+    return op.emitOpError(
+        "used type does not represent RVV-compatible scalable vector type");
+
+  if (!op2Type.isScalable() && op.getRvl())
+    return op.emitOpError(
+        "'rvl' must not be specified if operation is done on a "
+        "fixed vector type");
+
+  if (op1Type.isa<VectorType>() && op1Type != op2Type)
+    return op.emitOpError("op1 type does not match to op2 type");
+
+  if (op1Type.isa<FloatType>()) {
+    if (failed(verifyOpcode(op.getOpcodeAttr(), 1)))
+      return op.emitOpError(
+          "with a floating point scalar can only use 1-bit opcode");
+    return success();
+  }
+  if (failed(verifyOpcode(op.getOpcodeAttr(), 2)))
+    return op.emitOpError("must use 2-bit opcode");
+
+  if (op1Type.isInteger(5)) {
+    Operation *defOp = op.getOp1().getDefiningOp();
+    if (!defOp || !defOp->hasTrait<OpTrait::ConstantLike>())
+      return op.emitOpError("immediate operand must be a constant");
+    return success();
+  }
+  if (op1Type.isa<IntegerType>() && !op1Type.isInteger(32) &&
+      !op1Type.isInteger(64))
+    return op.emitOpError(
+        "non-constant integer first operand must be of a size 32 or 64");
+  return success();
+}
+
+/// Unary operations
+LogicalResult vcix::UnaryROOp::verify() {
+  if (failed(verifyOpcode(getOpcodeAttr(), 2)))
+    return emitOpError("must use 2-bit opcode");
+  return success();
+}
+
+LogicalResult vcix::UnaryOp::verify() {
+  if (failed(verifyOpcode(getOpcodeAttr(), 2)))
+    return emitOpError("must use 2-bit opcode");
+
+  if (failed(verifyVectorType(getResult().getType())))
+    return emitOpError(
+        "result type does not represent RVV-compatible scalable vector type");
+
+  return success();
+}
+
+/// Binary operations
+LogicalResult vcix::BinaryROOp::verify() {
+  return verifyVCIXOpCommon(*this, nullptr);
+}
+
+LogicalResult vcix::BinaryOp::verify() {
+  return verifyVCIXOpCommon(*this, getResult());
+}
+
+/// Ternary operations
+LogicalResult vcix::TernaryROOp::verify() {
+  VectorType op2Type = getOp2().getType().cast<VectorType>();
+  VectorType op3Type = getOp3().getType().cast<VectorType>();
+  if (op2Type != op3Type) {
+    return emitOpError("op3 type does not match to op2 type");
+  }
+  return verifyVCIXOpCommon(*this, nullptr);
+}
+
+LogicalResult vcix::TernaryOp::verify() {
+  VectorType op2Type = getOp2().getType().cast<VectorType>();
+  VectorType op3Type = getOp3().getType().cast<VectorType>();
+  if (op2Type != op3Type)
+    return emitOpError("op3 type does not match to op2 type");
+
+  return verifyVCIXOpCommon(*this, getResult());
+}
+
+/// Wide Ternary operations
+LogicalResult vcix::WideTernaryROOp::verify() {
+  VectorType op2Type = getOp2().getType().cast<VectorType>();
+  VectorType op3Type = getOp3().getType().cast<VectorType>();
+  if (failed(isWidenType(op2Type.getElementType(), op3Type.getElementType())))
+    return emitOpError("result type is not widened type of op2");
+
+  return verifyVCIXOpCommon(*this, nullptr);
+}
+
+LogicalResult vcix::WideTernaryOp::verify() {
+  VectorType op2Type = getOp2().getType().cast<VectorType>();
+  VectorType op3Type = getOp3().getType().cast<VectorType>();
+  if (failed(isWidenType(op2Type.getElementType(), op3Type.getElementType())))
+    return emitOpError("result type is not widened type of op2");
+
+  // Don't compare result type for widended operations
+  return verifyVCIXOpCommon(*this, nullptr);
+}
diff --git a/mlir/lib/Dialect/VCIX/Transforms/CMakeLists.txt b/mlir/lib/Dialect/VCIX/Transforms/CMakeLists.txt
new file mode 100644
index 0000000000000..5586f18195bfa
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/Transforms/CMakeLists.txt
@@ -0,0 +1,12 @@
+add_mlir_dialect_library(MLIRVCIXTransforms
+  LegalizeForLLVMExport.cpp
+
+  DEPENDS
+  MLIRVCIXConversionsIncGen
+
+  LINK_LIBS PUBLIC
+  MLIRVCIXDialect
+  MLIRIR
+  MLIRLLVMCommonConversion
+  MLIRLLVMDialect
+  )
diff --git a/mlir/lib/Dialect/VCIX/Transforms/LegalizeForLLVMExport.cpp b/mlir/lib/Dialect/VCIX/Transforms/LegalizeForLLVMExport.cpp
new file mode 100644
index 0000000000000..1c5c196a9a0dc
--- /dev/null
+++ b/mlir/lib/Dialect/VCIX/Transforms/LegalizeForLLVMExport.cpp
@@ -0,0 +1,266 @@
+//===- LegalizeForLLVMExport.cpp - Prepare VCIX for LLVM translation ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Conversion/LLVMCommon/ConversionTarget.h"
+#include "mlir/Conversion/LLVMCommon/Pattern.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+#include "mlir/Dialect/VCIX/Transforms.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
+#include "mlir/Dialect/Vector/IR/VectorOps.h"
+#include "mlir/IR/BuiltinOps.h"
+#include "mlir/IR/PatternMatch.h"
+#include <string>
+
+using namespace mlir;
+
+static constexpr char kVCIXTargetFeaturesAttr[] = "vcix.target_features";
+
+// Get integer value from an attribute and zext it to unsigned integer
+static unsigned getInteger(Attribute attr) {
+  auto intAttr = cast<IntegerAttr>(attr);
+  unsigned value = intAttr.getInt();
+  return value & ((1 << intAttr.getType().getIntOrFloatBitWidth()) - 1);
+}
+
+template <typename SourceOp, typename DestOp>
+struct OneToOneWithPromotionBase : public ConvertOpToLLVMPattern<SourceOp> {
+  using ConvertOpToLLVMPattern<SourceOp>::ConvertOpToLLVMPattern;
+
+  StringAttr getTargetFeatures(Operation *op) const {
+    Operation *func = op;
+    while (func) {
+      func = func->getParentOp();
+      if (isa<FunctionOpInterface>(func))
+        break;
+    }
+    if (!func)
+      llvm_unreachable("Cannot find function-like operation in parents");
+
+    const DictionaryAttr dictAttr = func->getAttrDictionary();
+    if (auto targetFeatures = dictAttr.getNamed(kVCIXTargetFeaturesAttr))
+      return targetFeatures->getValue().cast<StringAttr>();
+    return nullptr;
+  }
+
+  unsigned getXLen(Operation *op) const {
+    StringAttr targetFeatures = getTargetFeatures(op);
+    if (!targetFeatures)
+      return 64;
+
+    if (targetFeatures.getValue().contains("+32bit"))
+      return 32;
+
+    if (targetFeatures.getValue().contains("+64bit"))
+      return 64;
+
+    llvm_unreachable("Unsupported RISC-V target");
+  }
+
+  explicit OneToOneWithPromotionBase(LLVMTypeConverter &converter)
+      : mlir::ConvertOpToLLVMPattern<SourceOp>(converter) {}
+
+  /// Return new IntegerAttr with a value promoted to xlen if necessary
+  IntegerAttr promoteIntAttr(ConversionPatternRewriter &rewriter,
+                             Attribute attr, const unsigned xlen) const {
+    Type xlenType = rewriter.getIntegerType(xlen);
+    return rewriter.getIntegerAttr(xlenType, getInteger(attr));
+  }
+
+  /// Convert all operands to required type for correct legalization
+  FailureOr<SmallVector<Value>>
+  convertOperands(ConversionPatternRewriter &rewriter, ValueRange operands,
+                  const unsigned xlen) const {
+    SmallVector<Value> res(operands);
+    Value op1 = operands.front();
+    if (auto intType = op1.getType().template dyn_cast<IntegerType>())
+      if (intType.getWidth() < xlen) {
+        Value zext = rewriter.create<LLVM::ZExtOp>(
+            op1.getLoc(), rewriter.getIntegerType(xlen), op1);
+        res[0] = zext;
+      }
+    return res;
+  }
+};
+
+/// Convert vcix operation into intrinsic version with promotion of opcode, rd
+/// rs2 to Xlen
+template <typename SourceOp, typename DestOp>
+struct OneToOneUnaryROWithPromotion
+    : public OneToOneWithPromotionBase<SourceOp, DestOp> {
+  using OneToOneWithPromotionBase<SourceOp, DestOp>::OneToOneWithPromotionBase;
+
+  explicit OneToOneUnaryROWithPromotion(LLVMTypeConverter &converter)
+      : OneToOneWithPromotionBase<SourceOp, DestOp>(converter) {}
+
+  LogicalResult
+  matchAndRewrite(SourceOp op, typename SourceOp::Adaptor adaptor,
+                  ConversionPatternRewriter &rewriter) const final {
+    const unsigned xlen = this->getXLen(op);
+    FailureOr<SmallVector<Value>> operands =
+        this->convertOperands(rewriter, adaptor.getOperands(), xlen);
+    if (failed(operands))
+      return failure();
+
+    Operation *newOp = rewriter.create(
+        op->getLoc(), rewriter.getStringAttr(DestOp::getOperationName()),
+        *operands, {}, op->getAttrs());
+    DestOp dstOp = dyn_cast<DestOp>(newOp);
+    Type xlenType = rewriter.getIntegerType(xlen);
+    dstOp.setOpcodeAttr(
+        rewriter.getIntegerAttr(xlenType, getInteger(dstOp.getOpcodeAttr())));
+    dstOp.setRs2Attr(
+        rewriter.getIntegerAttr(xlenType, getInteger(dstOp.getRs2Attr())));
+    dstOp.setRdAttr(
+        rewriter.getIntegerAttr(xlenType, getInteger(dstOp.getRdAttr())));
+    rewriter.eraseOp(op);
+    return success();
+  }
+};
+
+/// Convert vcix operation into intrinsic version with promotion of opcode and
+/// rs2 to Xlen
+template <typename SourceOp, typename DestOp>
+struct OneToOneUnaryWithPromotion
+    : public OneToOneWithPromotionBase<SourceOp, DestOp> {
+  using OneToOneWithPromotionBase<SourceOp, DestOp>::OneToOneWithPromotionBase;
+
+  explicit OneToOneUnaryWithPromotion(LLVMTypeConverter &converter)
+      : OneToOneWithPromotionBase<SourceOp, DestOp>(converter) {}
+
+  LogicalResult
+  matchAndRewrite(SourceOp op, typename SourceOp::Adaptor adaptor,
+                  ConversionPatternRewriter &rewriter) const final {
+    const unsigned xlen = this->getXLen(op);
+    FailureOr<SmallVector<Value>> operands =
+        this->convertOperands(rewriter, adaptor.getOperands(), xlen);
+    if (failed(operands))
+      return failure();
+
+    Operation *newOp = rewriter.create(
+        op->getLoc(), rewriter.getStringAttr(DestOp::getOperationName()),
+        *operands, op->getResultTypes(), op->getAttrs());
+    DestOp dstOp = dyn_cast<DestOp>(newOp);
+    dstOp.setOpcodeAttr(
+        this->promoteIntAttr(rewriter, dstOp.getOpcodeAttr(), xlen));
+    dstOp.setRs2Attr(this->promoteIntAttr(rewriter, dstOp.getRs2Attr(), xlen));
+    rewriter.replaceOp(op, newOp);
+    return success();
+  }
+};
+
+/// Convert vcix operation into intrinsic version with promotion of opcode and
+/// rd to Xlen
+template <typename SourceOp, typename DestOp>
+struct OneToOneBinaryROWithPromotion
+    : public OneToOneWithPromotionBase<SourceOp, DestOp> {
+  using OneToOneWithPromotionBase<SourceOp, DestOp>::OneToOneWithPromotionBase;
+
+  explicit OneToOneBinaryROWithPromotion(LLVMTypeConverter &converter)
+      : OneToOneWithPromotionBase<SourceOp, DestOp>(converter) {}
+
+  LogicalResult
+  matchAndRewrite(SourceOp op, typename SourceOp::Adaptor adaptor,
+                  ConversionPatternRewriter &rewriter) const final {
+    const unsigned xlen = this->getXLen(op);
+    FailureOr<SmallVector<Value>> operands =
+        this->convertOperands(rewriter, adaptor.getOperands(), xlen);
+    if (failed(operands))
+      return failure();
+
+    Operation *newOp = rewriter.create(
+        op->getLoc(), rewriter.getStringAttr(DestOp::getOperationName()),
+        *operands, {}, op->getAttrs());
+    DestOp dstOp = dyn_cast<DestOp>(newOp);
+    dstOp.setOpcodeAttr(
+        this->promoteIntAttr(rewriter, dstOp.getOpcodeAttr(), xlen));
+    dstOp.setRdAttr(this->promoteIntAttr(rewriter, dstOp.getRdAttr(), xlen));
+
+    rewriter.eraseOp(op);
+    return success();
+  }
+};
+
+/// Convert vcix operation into intrinsic version with promotion of opcode to
+/// Xlen
+template <typename SourceOp, typename DestOp>
+struct OneToOneWithPromotion
+    : public OneToOneWithPromotionBase<SourceOp, DestOp> {
+  using OneToOneWithPromotionBase<SourceOp, DestOp>::OneToOneWithPromotionBase;
+
+  explicit OneToOneWithPromotion(LLVMTypeConverter &converter)
+      : OneToOneWithPromotionBase<SourceOp, DestOp>(converter) {}
+
+  LogicalResult
+  matchAndRewrite(SourceOp op, typename SourceOp::Adaptor adaptor,
+                  ConversionPatternRewriter &rewriter) const final {
+    const unsigned xlen = this->getXLen(op);
+    FailureOr<SmallVector<Value>> operands =
+        this->convertOperands(rewriter, adaptor.getOperands(), xlen);
+    if (failed(operands))
+      return failure();
+
+    Operation *newOp = rewriter.create(
+        op->getLoc(), rewriter.getStringAttr(DestOp::getOperationName()),
+        *operands, op->getResultTypes(), op->getAttrs());
+    DestOp dstOp = dyn_cast<DestOp>(newOp);
+    dstOp.setOpcodeAttr(
+        this->promoteIntAttr(rewriter, dstOp.getOpcodeAttr(), xlen));
+
+    if (op->getResultTypes().empty())
+      rewriter.eraseOp(op);
+    else
+      rewriter.replaceOp(op, newOp);
+
+    return success();
+  }
+};
+
+/// Populate the given list with patterns that convert from VCIX to LLVM.
+void mlir::populateVCIXLegalizeForLLVMExportPatterns(
+    LLVMTypeConverter &converter, RewritePatternSet &patterns) {
+  // Populate conversion patterns
+  patterns.add<
+      OneToOneUnaryWithPromotion<vcix::UnaryOp, vcix::UnaryIntrinOp>,
+      OneToOneUnaryROWithPromotion<vcix::UnaryROOp, vcix::UnaryIntrinROOp>,
+      OneToOneBinaryROWithPromotion<vcix::BinaryROOp, vcix::BinaryIntrinROOp>,
+      OneToOneWithPromotion<vcix::BinaryOp, vcix::BinaryIntrinOp>,
+      OneToOneWithPromotion<vcix::TernaryOp, vcix::TernaryIntrinOp>,
+      OneToOneWithPromotion<vcix::TernaryROOp, vcix::TernaryIntrinROOp>,
+      OneToOneWithPromotion<vcix::WideTernaryOp, vcix::WideTernaryIntrinOp>,
+      OneToOneWithPromotion<vcix::WideTernaryROOp,
+                            vcix::WideTernaryIntrinROOp>>(converter);
+}
+
+void mlir::configureVCIXLegalizeForExportTarget(LLVMConversionTarget &target) {
+  // During legalization some operation may zext operands to simplify conversion
+  // to LLVM IR later
+  // clang-format off
+  target.addLegalOp<LLVM::ZExtOp,
+                    LLVM::UndefOp,
+                    LLVM::vector_extract,
+                    LLVM::vector_insert,
+                    LLVM::BitcastOp>();
+  target.addLegalOp<vcix::UnaryIntrinOp,
+                    vcix::UnaryIntrinROOp,
+                    vcix::BinaryIntrinOp,
+                    vcix::BinaryIntrinROOp,
+                    vcix::TernaryIntrinOp,
+                    vcix::TernaryIntrinROOp,
+                    vcix::WideTernaryIntrinOp,
+                    vcix::WideTernaryIntrinROOp>();
+  target.addIllegalOp<vcix::UnaryOp,
+                      vcix::UnaryROOp,
+                      vcix::BinaryOp,
+                      vcix::BinaryROOp,
+                      vcix::TernaryOp,
+                      vcix::TernaryROOp,
+                      vcix::WideTernaryOp,
+                      vcix::WideTernaryROOp>();
+  // clang-format on
+}
diff --git a/mlir/lib/Target/LLVMIR/CMakeLists.txt b/mlir/lib/Target/LLVMIR/CMakeLists.txt
index 531c15a8703e9..05237a84d9106 100644
--- a/mlir/lib/Target/LLVMIR/CMakeLists.txt
+++ b/mlir/lib/Target/LLVMIR/CMakeLists.txt
@@ -59,6 +59,7 @@ add_mlir_translation_library(MLIRToLLVMIRTranslationRegistration
   MLIROpenMPToLLVMIRTranslation
   MLIRROCDLToLLVMIRTranslation
   MLIRSPIRVToLLVMIRTranslation
+  MLIRVCIXToLLVMIRTranslation
   )
 
 add_mlir_translation_library(MLIRTargetLLVMIRImport
diff --git a/mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt b/mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt
index c9d916d8a5d82..a88e8b1fd8338 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt
+++ b/mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt
@@ -10,4 +10,5 @@ add_subdirectory(OpenACC)
 add_subdirectory(OpenMP)
 add_subdirectory(ROCDL)
 add_subdirectory(SPIRV)
+add_subdirectory(VCIX)
 add_subdirectory(X86Vector)
diff --git a/mlir/lib/Target/LLVMIR/Dialect/VCIX/CMakeLists.txt b/mlir/lib/Target/LLVMIR/Dialect/VCIX/CMakeLists.txt
new file mode 100644
index 0000000000000..8b4af66810f9e
--- /dev/null
+++ b/mlir/lib/Target/LLVMIR/Dialect/VCIX/CMakeLists.txt
@@ -0,0 +1,17 @@
+add_mlir_translation_library(MLIRVCIXToLLVMIRTranslation
+  VCIXToLLVMIRTranslation.cpp
+
+  DEPENDS
+  MLIRVCIXConversionsIncGen
+
+  LINK_COMPONENTS
+  Core
+
+  LINK_LIBS PUBLIC
+  MLIRDialectUtils
+  MLIRIR
+  MLIRLLVMDialect
+  MLIRVCIXDialect
+  MLIRSupport
+  MLIRTargetLLVMIRExport
+  )
diff --git a/mlir/lib/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.cpp
new file mode 100644
index 0000000000000..c04e229713902
--- /dev/null
+++ b/mlir/lib/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.cpp
@@ -0,0 +1,215 @@
+//===- VCIXToLLVMIRTranslation.cpp - Translate VCIX to LLVM IR ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a translation between the VCIX dialect and
+// LLVM IR.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.h"
+#include "mlir/Dialect/Utils/StaticValueUtils.h"
+#include "mlir/Dialect/VCIX/VCIXDialect.h"
+#include "mlir/IR/Operation.h"
+#include "mlir/Support/LogicalResult.h"
+#include "mlir/Target/LLVMIR/ModuleTranslation.h"
+
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
+
+using namespace mlir;
+using namespace mlir::LLVM;
+using mlir::LLVM::detail::createIntrinsicCall;
+using mlir::LLVM::detail::getLLVMConstant;
+
+/// Return unary intrinsics that produces a vector
+static llvm::Intrinsic::ID getUnaryIntrinsicId(Type opType,
+                                               VectorType vecType) {
+  return opType.isInteger(5) ? llvm::Intrinsic::riscv_sf_vc_v_i
+                             : llvm::Intrinsic::riscv_sf_vc_v_x;
+}
+
+/// Return unary intrinsics that does not produce any vector
+static llvm::Intrinsic::ID getUnaryROIntrinsicId(vcix::SewLmul sewLmul,
+                                                 Type opType) {
+  switch (sewLmul) {
+#define SEW_LMUL_TO_INTRIN(SEW_LMUL)                                           \
+  case vcix::SewLmul::SEW_LMUL:                                                \
+    return opType.isInteger(5) ? llvm::Intrinsic::riscv_sf_vc_x_se_##SEW_LMUL  \
+                               : llvm::Intrinsic::riscv_sf_vc_i_se_##SEW_LMUL;
+
+    SEW_LMUL_TO_INTRIN(e8mf8);
+    SEW_LMUL_TO_INTRIN(e8mf4);
+    SEW_LMUL_TO_INTRIN(e8mf2);
+    SEW_LMUL_TO_INTRIN(e8m1);
+    SEW_LMUL_TO_INTRIN(e8m2);
+    SEW_LMUL_TO_INTRIN(e8m4);
+    SEW_LMUL_TO_INTRIN(e8m8);
+
+    SEW_LMUL_TO_INTRIN(e16mf4);
+    SEW_LMUL_TO_INTRIN(e16mf2);
+    SEW_LMUL_TO_INTRIN(e16m1);
+    SEW_LMUL_TO_INTRIN(e16m2);
+    SEW_LMUL_TO_INTRIN(e16m4);
+    SEW_LMUL_TO_INTRIN(e16m8);
+
+    SEW_LMUL_TO_INTRIN(e32mf2);
+    SEW_LMUL_TO_INTRIN(e32m1);
+    SEW_LMUL_TO_INTRIN(e32m2);
+    SEW_LMUL_TO_INTRIN(e32m4);
+    SEW_LMUL_TO_INTRIN(e32m8);
+
+    SEW_LMUL_TO_INTRIN(e64m1);
+    SEW_LMUL_TO_INTRIN(e64m2);
+    SEW_LMUL_TO_INTRIN(e64m4);
+    SEW_LMUL_TO_INTRIN(e64m8);
+  }
+  llvm_unreachable("unknown redux kind");
+}
+
+/// Return binary intrinsics that produces any vector
+static llvm::Intrinsic::ID getBinaryIntrinsicId(Type opType) {
+  if (auto intTy = opType.dyn_cast<IntegerType>())
+    return intTy.getWidth() == 5 ? llvm::Intrinsic::riscv_sf_vc_v_iv_se
+                                 : llvm::Intrinsic::riscv_sf_vc_v_xv_se;
+
+  if (opType.isa<FloatType>())
+    return llvm::Intrinsic::riscv_sf_vc_v_fv_se;
+
+  assert(opType.isa<VectorType>() &&
+         "First operand should either be imm, float or vector ");
+  return llvm::Intrinsic::riscv_sf_vc_v_vv_se;
+}
+
+/// Return binary intrinsics that does not produce any vector
+static llvm::Intrinsic::ID getBinaryROIntrinsicId(Type opType) {
+  if (auto intTy = opType.dyn_cast<IntegerType>())
+    return intTy.getWidth() == 5 ? llvm::Intrinsic::riscv_sf_vc_iv_se
+                                 : llvm::Intrinsic::riscv_sf_vc_xv_se;
+
+  if (opType.isa<FloatType>())
+    return llvm::Intrinsic::riscv_sf_vc_fv_se;
+
+  assert(opType.isa<VectorType>() &&
+         "First operand should either be imm, float or vector ");
+  return llvm::Intrinsic::riscv_sf_vc_vv_se;
+}
+
+/// Return ternary intrinsics that produces any vector
+static llvm::Intrinsic::ID getTernaryIntrinsicId(Type opType) {
+  if (auto intTy = opType.dyn_cast<IntegerType>())
+    return intTy.getWidth() == 5 ? llvm::Intrinsic::riscv_sf_vc_v_ivv_se
+                                 : llvm::Intrinsic::riscv_sf_vc_v_xvv_se;
+
+  if (opType.isa<FloatType>())
+    return llvm::Intrinsic::riscv_sf_vc_v_fvv_se;
+
+  assert(opType.isa<VectorType>() &&
+         "First operand should either be imm, float or vector ");
+  return llvm::Intrinsic::riscv_sf_vc_v_vvv_se;
+}
+
+/// Return ternary intrinsics that does not produce any vector
+static llvm::Intrinsic::ID getTernaryROIntrinsicId(Type opType) {
+  if (auto intTy = opType.dyn_cast<IntegerType>())
+    return intTy.getWidth() == 5 ? llvm::Intrinsic::riscv_sf_vc_ivv_se
+                                 : llvm::Intrinsic::riscv_sf_vc_xvv_se;
+
+  if (opType.isa<FloatType>())
+    return llvm::Intrinsic::riscv_sf_vc_fvv_se;
+
+  assert(opType.isa<VectorType>() &&
+         "First operand should either be imm, float or vector ");
+  return llvm::Intrinsic::riscv_sf_vc_vvv_se;
+}
+
+/// Return wide ternary intrinsics that produces any vector
+static llvm::Intrinsic::ID getWideTernaryIntrinsicId(Type opType) {
+  if (auto intTy = opType.dyn_cast<IntegerType>())
+    return intTy.getWidth() == 5 ? llvm::Intrinsic::riscv_sf_vc_v_ivw_se
+                                 : llvm::Intrinsic::riscv_sf_vc_v_xvw_se;
+
+  if (opType.isa<FloatType>())
+    return llvm::Intrinsic::riscv_sf_vc_v_fvw_se;
+
+  assert(opType.isa<VectorType>() &&
+         "First operand should either be imm, float or vector ");
+  return llvm::Intrinsic::riscv_sf_vc_v_vvw_se;
+}
+
+/// Return wide ternary intrinsics that does not produce any vector
+static llvm::Intrinsic::ID getWideTernaryROIntrinsicId(Type opType) {
+  if (auto intTy = opType.dyn_cast<IntegerType>())
+    return intTy.getWidth() == 5 ? llvm::Intrinsic::riscv_sf_vc_ivw_se
+                                 : llvm::Intrinsic::riscv_sf_vc_xvw_se;
+
+  if (opType.isa<FloatType>())
+    return llvm::Intrinsic::riscv_sf_vc_fvw_se;
+
+  assert(opType.isa<VectorType>() &&
+         "First operand should either be imm, float or vector ");
+  return llvm::Intrinsic::riscv_sf_vc_vvw_se;
+}
+
+/// Return RVL for VCIX intrinsic. If rvl was previously set, return it,
+/// otherwise construct a constant using fixed vector type
+static llvm::Value *convertRvl(llvm::IRBuilderBase &builder, llvm::Value *rvl,
+                               VectorType vtype, llvm::Type *xlen, Location loc,
+                               LLVM::ModuleTranslation &moduleTranslation) {
+  if (rvl) {
+    assert(vtype.isScalable() &&
+           "rvl parameter must be set for scalable vectors");
+    return builder.CreateZExtOrTrunc(rvl, xlen);
+  }
+
+  assert(vtype.getRank() == 1 && "Only 1-d fixed vectors are supported");
+  return getLLVMConstant(
+      xlen,
+      IntegerAttr::get(IntegerType::get(&moduleTranslation.getContext(), 64),
+                       vtype.getShape()[0]),
+      loc, moduleTranslation);
+}
+
+/// Infer Xlen width from opcode's type. This is done to avoid passing target
+/// option around
+static unsigned getXlenFromOpcode(Attribute opcodeAttr) {
+  auto intAttr = opcodeAttr.cast<IntegerAttr>();
+  return intAttr.getType().cast<IntegerType>().getWidth();
+}
+
+namespace {
+/// Implementation of the dialect interface that converts operations belonging
+/// to the VCIX dialect to LLVM IR.
+class VCIXDialectLLVMIRTranslationInterface
+    : public LLVMTranslationDialectInterface {
+public:
+  using LLVMTranslationDialectInterface::LLVMTranslationDialectInterface;
+
+  /// Translates the given operation to LLVM IR using the provided IR builder
+  /// and saving the state in `moduleTranslation`.
+  LogicalResult
+  convertOperation(Operation *op, llvm::IRBuilderBase &builder,
+                   LLVM::ModuleTranslation &moduleTranslation) const final {
+    Operation &opInst = *op;
+#include "mlir/Dialect/VCIX/VCIXConversions.inc"
+    return failure();
+  }
+};
+} // namespace
+
+void mlir::registerVCIXDialectTranslation(DialectRegistry &registry) {
+  registry.insert<vcix::VCIXDialect>();
+  registry.addExtension(+[](MLIRContext *ctx, vcix::VCIXDialect *dialect) {
+    dialect->addInterfaces<VCIXDialectLLVMIRTranslationInterface>();
+  });
+}
+
+void mlir::registerVCIXDialectTranslation(MLIRContext &context) {
+  DialectRegistry registry;
+  registerVCIXDialectTranslation(registry);
+  context.appendDialectRegistry(registry);
+}
diff --git a/mlir/test/Dialect/VCIX/invalid.mlir b/mlir/test/Dialect/VCIX/invalid.mlir
new file mode 100644
index 0000000000000..65c783dc85200
--- /dev/null
+++ b/mlir/test/Dialect/VCIX/invalid.mlir
@@ -0,0 +1,57 @@
+// RUN: mlir-opt %s -split-input-file -verify-diagnostics
+
+// -----
+func.func @unary_e8mf2(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  // expected-error at +1 {{must use 2-bit opcode}}
+  vcix.unary.ro e8mf2 %const, %rvl { opcode = 1 : i1, rs2 = 30 : i5, rd = 31 : i5 } : (i32, ui32)
+  return
+}
+// -----
+func.func @binary_fv(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+  // expected-error at +1 {{with a floating point scalar can only use 1-bit opcode}}
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 2 : i2 } : (f32, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @ternary_fvv(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+  // expected-error at +1 {{with a floating point scalar can only use 1-bit opcode}}
+  %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 2 : i2 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @wide_ternary_fvw(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+  // expected-error at +1 {{with a floating point scalar can only use 1-bit opcode}}
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 2 : i2 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+  return %0 : vector<[4] x f64>
+}
+
+// -----
+func.func @wide_ternary_vvw(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+  // expected-error at +1 {{result type is not widened type of op2}}
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @binary_fv_wrong_vtype(%op1: f32, %op2 : vector<[32] x f32>, %rvl : ui32) -> vector<[32] x f32> {
+  // expected-error at +1 {{used type does not represent RVV-compatible scalable vector type}}
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<[32] x f32>, ui32) -> vector<[32] x f32>
+  return %0 : vector<[32] x f32>
+}
+
+// -----
+func.func @binary_fv_vls_rvl(%op1: f32, %op2 : vector<4 x f32>, %rvl : ui32) -> vector<4 x f32> {
+  // expected-error at +1 {{'rvl' must not be specified if operation is done on a fixed vector type}}
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<4 x f32>, ui32) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// -----
+func.func @binary_nonconst(%val: i5, %op: vector<[4] x f32>, %rvl: ui32) {
+  // expected-error at +1 {{immediate operand must be a constant}}
+  vcix.binary.ro %val, %op, %rvl { opcode = 1 : i2, rd = 30 : i5 } : (i5, vector<[4] x f32>, ui32)
+  return
+}
diff --git a/mlir/test/Dialect/VCIX/legalize-for-llvm-rv32.mlir b/mlir/test/Dialect/VCIX/legalize-for-llvm-rv32.mlir
new file mode 100644
index 0000000000000..175ed62c49dc4
--- /dev/null
+++ b/mlir/test/Dialect/VCIX/legalize-for-llvm-rv32.mlir
@@ -0,0 +1,1071 @@
+// RUN: mlir-opt %s -convert-vector-to-llvm="enable-vcix" -convert-func-to-llvm -reconcile-unrealized-casts | FileCheck %s
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_ro_e8mf8(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 0 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8mf8(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8mf8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e8mf4(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 1 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8mf4(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e8mf2(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 2 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8mf2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e8m1(
+// CHECK-SAME:                             %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 3 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8m1(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e8m2(
+// CHECK-SAME:                             %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 4 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8m2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e8m4(
+// CHECK-SAME:                             %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 5 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8m4(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e8m8(
+// CHECK-SAME:                             %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 6 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8m8(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_e8mf8(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[1]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[1]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[1]xi8>
+// CHECK:         }
+func.func @unary_e8mf8(%rvl: ui32) -> vector<[1] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i8>
+  return %0 : vector<[1] x i8>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e8mf4(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[2]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[2]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[2]xi8>
+// CHECK:         }
+func.func @unary_e8mf4(%rvl: ui32) -> vector<[2] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i8>
+  return %0 : vector<[2] x i8>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e8mf2(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[4]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[4]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[4]xi8>
+// CHECK:         }
+func.func @unary_e8mf2(%rvl: ui32) -> vector<[4] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i8>
+  return %0 : vector<[4] x i8>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e8m1(
+// CHECK-SAME:                          %[[VAL_0:.*]]: i32) -> vector<[8]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[8]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[8]xi8>
+// CHECK:         }
+func.func @unary_e8m1(%rvl: ui32) -> vector<[8] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i8>
+  return %0 : vector<[8] x i8>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e8m2(
+// CHECK-SAME:                          %[[VAL_0:.*]]: i32) -> vector<[16]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[16]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[16]xi8>
+// CHECK:         }
+func.func @unary_e8m2(%rvl: ui32) -> vector<[16] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i8>
+  return %0 : vector<[16] x i8>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e8m4(
+// CHECK-SAME:                          %[[VAL_0:.*]]: i32) -> vector<[32]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[32]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[32]xi8>
+// CHECK:         }
+func.func @unary_e8m4(%rvl: ui32) -> vector<[32] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[32] x i8>
+  return %0 : vector<[32] x i8>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e8m8(
+// CHECK-SAME:                          %[[VAL_0:.*]]: i32) -> vector<[64]xi8> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[64]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[64]xi8>
+// CHECK:         }
+func.func @unary_e8m8(%rvl: ui32) -> vector<[64] x i8> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[64] x i8>
+  return %0 : vector<[64] x i8>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_ro_e16mf4(
+// CHECK-SAME:                               %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 7 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e16mf4(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e16mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e16mf2(
+// CHECK-SAME:                               %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 8 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e16mf2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e16mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e16m1(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 9 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e16m1(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e16m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e16m2(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 10 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e16m2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e16m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e16m4(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 11 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e16m4(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e16m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e16m8(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 12 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e16m8(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e16m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_e16mf4(
+// CHECK-SAME:                            %[[VAL_0:.*]]: i32) -> vector<[1]xi16> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[1]xi16>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[1]xi16>
+// CHECK:         }
+func.func @unary_e16mf4(%rvl: ui32) -> vector<[1] x i16> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i16>
+  return %0 : vector<[1] x i16>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e16mf2(
+// CHECK-SAME:                            %[[VAL_0:.*]]: i32) -> vector<[2]xi16> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[2]xi16>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[2]xi16>
+// CHECK:         }
+func.func @unary_e16mf2(%rvl: ui32) -> vector<[2] x i16> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i16>
+  return %0 : vector<[2] x i16>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e16m1(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[4]xi16> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[4]xi16>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[4]xi16>
+// CHECK:         }
+func.func @unary_e16m1(%rvl: ui32) -> vector<[4] x i16> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i16>
+  return %0 : vector<[4] x i16>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e16m2(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[8]xi16> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[8]xi16>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[8]xi16>
+// CHECK:         }
+func.func @unary_e16m2(%rvl: ui32) -> vector<[8] x i16> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i16>
+  return %0 : vector<[8] x i16>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e16m4(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[16]xi16> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[16]xi16>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[16]xi16>
+// CHECK:         }
+func.func @unary_e16m4(%rvl: ui32) -> vector<[16] x i16> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i16>
+  return %0 : vector<[16] x i16>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e16m8(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[32]xi16> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[32]xi16>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[32]xi16>
+// CHECK:         }
+func.func @unary_e16m8(%rvl: ui32) -> vector<[32] x i16> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[32] x i16>
+  return %0 : vector<[32] x i16>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_ro_e32mf2(
+// CHECK-SAME:                               %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 13 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e32mf2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e32mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e32m1(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 14 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e32m1(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e32m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e32m2(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 15 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e32m2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e32m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e32m4(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 16 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e32m4(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e32m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e32m8(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 17 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e32m8(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e32m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_e32mf2(
+// CHECK-SAME:                            %[[VAL_0:.*]]: i32) -> vector<[1]xi32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[1]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[1]xi32>
+// CHECK:         }
+func.func @unary_e32mf2(%rvl: ui32) -> vector<[1] x i32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i32>
+  return %0 : vector<[1] x i32>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e32m1(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[2]xi32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[2]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[2]xi32>
+// CHECK:         }
+func.func @unary_e32m1(%rvl: ui32) -> vector<[2] x i32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i32>
+  return %0 : vector<[2] x i32>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e32m2(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[4]xi32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[4]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[4]xi32>
+// CHECK:         }
+func.func @unary_e32m2(%rvl: ui32) -> vector<[4] x i32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i32>
+  return %0 : vector<[4] x i32>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e32m4(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[8]xi32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[8]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[8]xi32>
+// CHECK:         }
+func.func @unary_e32m4(%rvl: ui32) -> vector<[8] x i32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i32>
+  return %0 : vector<[8] x i32>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e32m8(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[16]xi32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[16]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[16]xi32>
+// CHECK:         }
+func.func @unary_e32m8(%rvl: ui32) -> vector<[16] x i32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i32>
+  return %0 : vector<[16] x i32>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_ro_e64m1(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 18 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e64m1(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e64m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e64m2(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 19 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e64m2(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e64m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e64m4(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 20 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e64m4(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e64m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e64m8(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 21 : i32}> : (i32, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e64m8(%rvl: ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e64m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_e64m1(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[1]xi64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[1]xi64>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[1]xi64>
+// CHECK:         }
+func.func @unary_e64m1(%rvl: ui32) -> vector<[1] x i64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i64>
+  return %0 : vector<[1] x i64>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e64m2(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[2]xi64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[2]xi64>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[2]xi64>
+// CHECK:         }
+func.func @unary_e64m2(%rvl: ui32) -> vector<[2] x i64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i64>
+  return %0 : vector<[2] x i64>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e64m4(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[4]xi64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[4]xi64>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[4]xi64>
+// CHECK:         }
+func.func @unary_e64m4(%rvl: ui32) -> vector<[4] x i64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i64>
+  return %0 : vector<[4] x i64>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e64m8(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32) -> vector<[8]xi64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[8]xi64>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[8]xi64>
+// CHECK:         }
+func.func @unary_e64m8(%rvl: ui32) -> vector<[8] x i64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i64>
+  return %0 : vector<[8] x i64>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @binary_vv_ro(
+// CHECK-SAME:                            %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32, rd = 30 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @binary_vv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (vector<[4] x f32>, vector<[4] x f32>, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @binary_vv(
+// CHECK-SAME:                         %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<[4]xf32>
+// CHECK:         }
+func.func @binary_vv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL:   llvm.func @binary_xv_ro(
+// CHECK-SAME:                            %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32, rd = 30 : i32}> : (i32, vector<[4]xf32>, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @binary_xv_ro(%op1: i32, %op2 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i32, vector<[4] x f32>, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @binary_xv(
+// CHECK-SAME:                         %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<[4]xf32>
+// CHECK:         }
+func.func @binary_xv(%op1: i32, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL:   llvm.func @binary_fv_ro(
+// CHECK-SAME:                            %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i32, rd = 30 : i32}> : (f32, vector<[4]xf32>, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @binary_fv_ro(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.binary.ro %op1, %op2, %rvl { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<[4] x f32>, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @binary_fv(
+// CHECK-SAME:                         %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<[4]xf32>
+// CHECK:         }
+func.func @binary_fv(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL:   llvm.func @binary_iv_ro(
+// CHECK-SAME:                            %[[VAL_0:.*]]: vector<[4]xf32>,
+// CHECK-SAME:                            %[[VAL_1:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i32
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32, rd = 30 : i32}> : (i32, vector<[4]xf32>, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @binary_iv_ro(%op2 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  vcix.binary.ro %const, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<[4] x f32>, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @binary_iv(
+// CHECK-SAME:                         %[[VAL_0:.*]]: vector<[4]xf32>,
+// CHECK-SAME:                         %[[VAL_1:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i32
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.binary"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK:         }
+func.func @binary_iv(%op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  %0 = vcix.binary %const, %op2, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @ternary_vvv_ro(
+// CHECK-SAME:                              %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @ternary_vvv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @ternary_vvv(
+// CHECK-SAME:                           %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK:         }
+func.func @ternary_vvv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL:   llvm.func @ternary_xvv_ro(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @ternary_xvv_ro(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f32>, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @ternary_xvv(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK:         }
+func.func @ternary_xvv(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL:   llvm.func @ternary_fvv_ro(
+// CHECK-SAME:                              %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @ternary_fvv_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @ternary_fvv(
+// CHECK-SAME:                           %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK:         }
+func.func @ternary_fvv(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL:   llvm.func @ternary_ivv_ro(
+// CHECK-SAME:                              %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i32
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @ternary_ivv_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  vcix.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @ternary_ivv(
+// CHECK-SAME:                           %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i32) -> vector<[4]xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i32
+// CHECK:           %[[VAL_5:.*]] = "vcix.intrin.ternary"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_5]] : vector<[4]xf32>
+// CHECK:         }
+func.func @ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  %0 = vcix.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @wide_ternary_vvw_ro(
+// CHECK-SAME:                                   %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf64>, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @wide_ternary_vvw_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_vvw(
+// CHECK-SAME:                                %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i32) -> vector<[4]xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf64>, i32) -> vector<[4]xf64>
+// CHECK:           llvm.return %[[VAL_4]] : vector<[4]xf64>
+// CHECK:         }
+func.func @wide_ternary_vvw(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+  return %0: vector<[4] x f64>
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_xvw_ro(
+// CHECK-SAME:                                   %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf64>, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @wide_ternary_xvw_ro(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f64>, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_xvw(
+// CHECK-SAME:                                %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i32) -> vector<[4]xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf64>, i32) -> vector<[4]xf64>
+// CHECK:           llvm.return %[[VAL_4]] : vector<[4]xf64>
+// CHECK:         }
+func.func @wide_ternary_xvw(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+  return %0 : vector<[4] x f64>
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_fvw_ro(
+// CHECK-SAME:                                   %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, vector<[4]xf64>, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @wide_ternary_fvw_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_fvw(
+// CHECK-SAME:                                %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i32) -> vector<[4]xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, vector<[4]xf64>, i32) -> vector<[4]xf64>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[4]xf64>
+// CHECK:         }
+func.func @wide_ternary_fvw(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+  return %op3 : vector<[4] x f64>
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_ivw_ro(
+// CHECK-SAME:                                   %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf64>, %[[VAL_2:.*]]: i32) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i32
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf64>, i32) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @wide_ternary_ivw_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  vcix.wide.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui32)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_ivv(
+// CHECK-SAME:                                %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf64>, %[[VAL_2:.*]]: i32) -> vector<[4]xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i32
+// CHECK:           %[[VAL_5:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf64>, i32) -> vector<[4]xf64>
+// CHECK:           llvm.return %[[VAL_1]] : vector<[4]xf64>
+// CHECK:         }
+func.func @wide_ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  %0 = vcix.wide.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+  return %op3 : vector<[4] x f64>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @fixed_binary_vv_ro(
+// CHECK-SAME:                                  %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME:                                  %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32, rd = 30 : i32}> : (vector<4xf32>, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_binary_vv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_vv(
+// CHECK-SAME:                               %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME:                               %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_binary_vv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_xv_ro(
+// CHECK-SAME:                                  %[[VAL_0:.*]]: i32,
+// CHECK-SAME:                                  %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32, rd = 30 : i32}> : (i32, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_binary_xv_ro(%op1: i32, %op2 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i32, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_xv(
+// CHECK-SAME:                               %[[VAL_0:.*]]: i32,
+// CHECK-SAME:                               %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_binary_xv(%op1: i32, %op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (i32, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_fv_ro(
+// CHECK-SAME:                                  %[[VAL_0:.*]]: f32,
+// CHECK-SAME:                                  %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 1 : i32, rd = 30 : i32}> : (f32, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_binary_fv_ro(%op1: f32, %op2 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.binary.ro %op1, %op2 { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_fv(
+// CHECK-SAME:                               %[[VAL_0:.*]]: f32,
+// CHECK-SAME:                               %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 1 : i32}> : (f32, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_binary_fv(%op1: f32, %op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.binary %op1, %op2 { opcode = 1 : i1 } : (f32, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_iv_ro(
+// CHECK-SAME:                                  %[[VAL_0:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_2:.*]] = llvm.zext %[[VAL_1]] : i5 to i32
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_2]], %[[VAL_0]]) <{opcode = 3 : i32, rd = 30 : i32}> : (i32, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_binary_iv_ro(%op2 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  vcix.binary.ro %const, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_iv(
+// CHECK-SAME:                               %[[VAL_0:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_2:.*]] = llvm.zext %[[VAL_1]] : i5 to i32
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_2]], %[[VAL_0]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_binary_iv(%op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  %0 = vcix.binary %const, %op2 { opcode = 3 : i2 } : (i5, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @fixed_ternary_vvv_ro(
+// CHECK-SAME:                                    %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_ternary_vvv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_vvv(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_ternary_vvv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_xvv_ro(
+// CHECK-SAME:                                    %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_ternary_xvv_ro(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_xvv(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_ternary_xvv(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_fvv_ro(
+// CHECK-SAME:                                    %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i32}> : (f32, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_ternary_fvv_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_fvv(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i32}> : (f32, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_ternary_fvv(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_ivv_ro(
+// CHECK-SAME:                                    %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME:                                    %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i32
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_ternary_ivv_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  vcix.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_ivv(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME:                                 %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i32
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_4]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  %0 = vcix.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_vvw_ro(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_wide_ternary_vvw_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_vvw(
+// CHECK-SAME:                                      %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK:           llvm.return %[[VAL_3]] : vector<4xf64>
+// CHECK:         }
+func.func @fixed_wide_ternary_vvw(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %0 : vector<4 x f64>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_xvw_ro(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_wide_ternary_xvw_ro(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_xvw(
+// CHECK-SAME:                                      %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK:           llvm.return %[[VAL_3]] : vector<4xf64>
+// CHECK:         }
+func.func @fixed_wide_ternary_xvw(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %0 : vector<4 x f64>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_fvw_ro(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i32}> : (f32, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_wide_ternary_fvw_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_fvw(
+// CHECK-SAME:                                      %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i32}> : (f32, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK:           llvm.return %[[VAL_2]] : vector<4xf64>
+// CHECK:         }
+func.func @fixed_wide_ternary_fvw(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %op3 : vector<4 x f64>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_ivw_ro(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME:                                         %[[VAL_1:.*]]: vector<4xf64>) attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i32
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_wide_ternary_ivw_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  vcix.wide.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_ivv(
+// CHECK-SAME:                                      %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME:                                      %[[VAL_1:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i32
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK:           llvm.return %[[VAL_1]] : vector<4xf64>
+// CHECK:         }
+func.func @fixed_wide_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+32bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  %0 = vcix.wide.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %op3 : vector<4 x f64>
+}
diff --git a/mlir/test/Dialect/VCIX/legalize-for-llvm-rv64.mlir b/mlir/test/Dialect/VCIX/legalize-for-llvm-rv64.mlir
new file mode 100644
index 0000000000000..6fdf390363f2e
--- /dev/null
+++ b/mlir/test/Dialect/VCIX/legalize-for-llvm-rv64.mlir
@@ -0,0 +1,1071 @@
+// RUN: mlir-opt %s -convert-vector-to-llvm="enable-vcix" -convert-func-to-llvm -reconcile-unrealized-casts | FileCheck %s
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_ro_e8mf8(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 0 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8mf8(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8mf8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e8mf4(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 1 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8mf4(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e8mf2(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 2 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8mf2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e8m1(
+// CHECK-SAME:                             %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 3 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8m1(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e8m2(
+// CHECK-SAME:                             %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 4 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8m2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e8m4(
+// CHECK-SAME:                             %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 5 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8m4(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e8m8(
+// CHECK-SAME:                             %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 6 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e8m8(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_e8mf8(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[1]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[1]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[1]xi8>
+// CHECK:         }
+func.func @unary_e8mf8(%rvl: ui64) -> vector<[1] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[1] x i8>
+  return %0 : vector<[1] x i8>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e8mf4(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[2]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[2]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[2]xi8>
+// CHECK:         }
+func.func @unary_e8mf4(%rvl: ui64) -> vector<[2] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[2] x i8>
+  return %0 : vector<[2] x i8>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e8mf2(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[4]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[4]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[4]xi8>
+// CHECK:         }
+func.func @unary_e8mf2(%rvl: ui64) -> vector<[4] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[4] x i8>
+  return %0 : vector<[4] x i8>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e8m1(
+// CHECK-SAME:                          %[[VAL_0:.*]]: i64) -> vector<[8]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[8]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[8]xi8>
+// CHECK:         }
+func.func @unary_e8m1(%rvl: ui64) -> vector<[8] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[8] x i8>
+  return %0 : vector<[8] x i8>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e8m2(
+// CHECK-SAME:                          %[[VAL_0:.*]]: i64) -> vector<[16]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[16]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[16]xi8>
+// CHECK:         }
+func.func @unary_e8m2(%rvl: ui64) -> vector<[16] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[16] x i8>
+  return %0 : vector<[16] x i8>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e8m4(
+// CHECK-SAME:                          %[[VAL_0:.*]]: i64) -> vector<[32]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[32]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[32]xi8>
+// CHECK:         }
+func.func @unary_e8m4(%rvl: ui64) -> vector<[32] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[32] x i8>
+  return %0 : vector<[32] x i8>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e8m8(
+// CHECK-SAME:                          %[[VAL_0:.*]]: i64) -> vector<[64]xi8> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[64]xi8>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[64]xi8>
+// CHECK:         }
+func.func @unary_e8m8(%rvl: ui64) -> vector<[64] x i8> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[64] x i8>
+  return %0 : vector<[64] x i8>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_ro_e16mf4(
+// CHECK-SAME:                               %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 7 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e16mf4(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e16mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e16mf2(
+// CHECK-SAME:                               %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 8 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e16mf2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e16mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e16m1(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 9 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e16m1(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e16m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e16m2(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 10 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e16m2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e16m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e16m4(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 11 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e16m4(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e16m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e16m8(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 12 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e16m8(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e16m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_e16mf4(
+// CHECK-SAME:                            %[[VAL_0:.*]]: i64) -> vector<[1]xi16> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[1]xi16>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[1]xi16>
+// CHECK:         }
+func.func @unary_e16mf4(%rvl: ui64) -> vector<[1] x i16> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[1] x i16>
+  return %0 : vector<[1] x i16>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e16mf2(
+// CHECK-SAME:                            %[[VAL_0:.*]]: i64) -> vector<[2]xi16> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[2]xi16>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[2]xi16>
+// CHECK:         }
+func.func @unary_e16mf2(%rvl: ui64) -> vector<[2] x i16> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[2] x i16>
+  return %0 : vector<[2] x i16>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e16m1(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[4]xi16> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[4]xi16>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[4]xi16>
+// CHECK:         }
+func.func @unary_e16m1(%rvl: ui64) -> vector<[4] x i16> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[4] x i16>
+  return %0 : vector<[4] x i16>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e16m2(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[8]xi16> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[8]xi16>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[8]xi16>
+// CHECK:         }
+func.func @unary_e16m2(%rvl: ui64) -> vector<[8] x i16> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[8] x i16>
+  return %0 : vector<[8] x i16>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e16m4(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[16]xi16> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[16]xi16>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[16]xi16>
+// CHECK:         }
+func.func @unary_e16m4(%rvl: ui64) -> vector<[16] x i16> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[16] x i16>
+  return %0 : vector<[16] x i16>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e16m8(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[32]xi16> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[32]xi16>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[32]xi16>
+// CHECK:         }
+func.func @unary_e16m8(%rvl: ui64) -> vector<[32] x i16> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[32] x i16>
+  return %0 : vector<[32] x i16>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_ro_e32mf2(
+// CHECK-SAME:                               %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 13 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e32mf2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e32mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e32m1(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 14 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e32m1(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e32m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e32m2(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 15 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e32m2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e32m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e32m4(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 16 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e32m4(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e32m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e32m8(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 17 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e32m8(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e32m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_e32mf2(
+// CHECK-SAME:                            %[[VAL_0:.*]]: i64) -> vector<[1]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[1]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[1]xi32>
+// CHECK:         }
+func.func @unary_e32mf2(%rvl: ui64) -> vector<[1]  xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[1]  xi32>
+  return %0 : vector<[1]  xi32>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e32m1(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[2]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[2]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[2]xi32>
+// CHECK:         }
+func.func @unary_e32m1(%rvl: ui64) -> vector<[2]  xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[2]  xi32>
+  return %0 : vector<[2]  xi32>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e32m2(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[4]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[4]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[4]xi32>
+// CHECK:         }
+func.func @unary_e32m2(%rvl: ui64) -> vector<[4]  xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[4]  xi32>
+  return %0 : vector<[4]  xi32>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e32m4(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[8]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[8]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[8]xi32>
+// CHECK:         }
+func.func @unary_e32m4(%rvl: ui64) -> vector<[8]  xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[8]  xi32>
+  return %0 : vector<[8]  xi32>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e32m8(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[16]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[16]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[16]xi32>
+// CHECK:         }
+func.func @unary_e32m8(%rvl: ui64) -> vector<[16]  xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[16]  xi32>
+  return %0 : vector<[16]  xi32>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_ro_e64m1(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 18 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e64m1(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e64m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e64m2(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 19 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e64m2(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e64m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e64m4(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 20 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e64m4(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e64m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @unary_ro_e64m8(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           "vcix.intrin.unary.ro"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 21 : i32}> : (i64, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @unary_ro_e64m8(%rvl: ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e64m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @unary_e64m1(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[1]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[1]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[1]xi32>
+// CHECK:         }
+func.func @unary_e64m1(%rvl: ui64) -> vector<[1]  xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[1]  xi32>
+  return %0 : vector<[1]  xi32>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e64m2(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[2]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[2]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[2]xi32>
+// CHECK:         }
+func.func @unary_e64m2(%rvl: ui64) -> vector<[2]  xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[2]  xi32>
+  return %0 : vector<[2]  xi32>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e64m4(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[4]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[4]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[4]xi32>
+// CHECK:         }
+func.func @unary_e64m4(%rvl: ui64) -> vector<[4]  xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[4]  xi32>
+  return %0 : vector<[4]  xi32>
+}
+
+// CHECK-LABEL:   llvm.func @unary_e64m8(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64) -> vector<[8]xi32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(0 : i64) : i64
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.unary"(%[[VAL_1]], %[[VAL_0]]) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[8]xi32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[8]xi32>
+// CHECK:         }
+func.func @unary_e64m8(%rvl: ui64) -> vector<[8]  xi32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[8]  xi32>
+  return %0 : vector<[8]  xi32>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @binary_vv_ro(
+// CHECK-SAME:                            %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64, rd = 30 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @binary_vv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (vector<[4] x f32>, vector<[4] x f32>, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @binary_vv(
+// CHECK-SAME:                         %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<[4]xf32>
+// CHECK:         }
+func.func @binary_vv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL:   llvm.func @binary_xv_ro(
+// CHECK-SAME:                            %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64, rd = 30 : i64}> : (i64, vector<[4]xf32>, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @binary_xv_ro(%op1: i64, %op2 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i64, vector<[4] x f32>, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @binary_xv(
+// CHECK-SAME:                         %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<[4]xf32>
+// CHECK:         }
+func.func @binary_xv(%op1: i64, %op2 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL:   llvm.func @binary_fv_ro(
+// CHECK-SAME:                            %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i64, rd = 30 : i64}> : (f32, vector<[4]xf32>, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @binary_fv_ro(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.binary.ro %op1, %op2, %rvl { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<[4] x f32>, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @binary_fv(
+// CHECK-SAME:                         %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<[4]xf32>
+// CHECK:         }
+func.func @binary_fv(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL:   llvm.func @binary_iv_ro(
+// CHECK-SAME:                            %[[VAL_0:.*]]: vector<[4]xf32>,
+// CHECK-SAME:                            %[[VAL_1:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i64
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64, rd = 30 : i64}> : (i64, vector<[4]xf32>, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @binary_iv_ro(%op2 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  vcix.binary.ro %const, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<[4] x f32>, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @binary_iv(
+// CHECK-SAME:                         %[[VAL_0:.*]]: vector<[4]xf32>,
+// CHECK-SAME:                         %[[VAL_1:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i64
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.binary"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK:         }
+func.func @binary_iv(%op2 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  %0 = vcix.binary %const, %op2, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @ternary_vvv_ro(
+// CHECK-SAME:                              %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @ternary_vvv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @ternary_vvv(
+// CHECK-SAME:                           %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK:         }
+func.func @ternary_vvv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL:   llvm.func @ternary_xvv_ro(
+// CHECK-SAME:                              %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @ternary_xvv_ro(%op1: i64, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, vector<[4] x f32>, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @ternary_xvv(
+// CHECK-SAME:                           %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK:         }
+func.func @ternary_xvv(%op1: i64, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL:   llvm.func @ternary_fvv_ro(
+// CHECK-SAME:                              %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @ternary_fvv_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @ternary_fvv(
+// CHECK-SAME:                           %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf32>, %[[VAL_3:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_4]] : vector<[4]xf32>
+// CHECK:         }
+func.func @ternary_fvv(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// CHECK-LABEL:   llvm.func @ternary_ivv_ro(
+// CHECK-SAME:                              %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i64
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @ternary_ivv_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  vcix.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @ternary_ivv(
+// CHECK-SAME:                           %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: i64) -> vector<[4]xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i64
+// CHECK:           %[[VAL_5:.*]] = "vcix.intrin.ternary"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+// CHECK:           llvm.return %[[VAL_5]] : vector<[4]xf32>
+// CHECK:         }
+func.func @ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  %0 = vcix.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @wide_ternary_vvw_ro(
+// CHECK-SAME:                                   %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf64>, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @wide_ternary_vvw_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_vvw(
+// CHECK-SAME:                                %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i64) -> vector<[4]xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf64>, i64) -> vector<[4]xf64>
+// CHECK:           llvm.return %[[VAL_4]] : vector<[4]xf64>
+// CHECK:         }
+func.func @wide_ternary_vvw(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) -> vector<[4] x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui64) -> vector<[4] x f64>
+  return %0: vector<[4] x f64>
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_xvw_ro(
+// CHECK-SAME:                                   %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf64>, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @wide_ternary_xvw_ro(%op1: i64, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, vector<[4] x f64>, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_xvw(
+// CHECK-SAME:                                %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i64) -> vector<[4]xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf64>, i64) -> vector<[4]xf64>
+// CHECK:           llvm.return %[[VAL_4]] : vector<[4]xf64>
+// CHECK:         }
+func.func @wide_ternary_xvw(%op1: i64, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) -> vector<[4] x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, vector<[4] x f64>, ui64) -> vector<[4] x f64>
+  return %0 : vector<[4] x f64>
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_fvw_ro(
+// CHECK-SAME:                                   %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, vector<[4]xf64>, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @wide_ternary_fvw_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_fvw(
+// CHECK-SAME:                                %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<[4]xf32>, %[[VAL_2:.*]]: vector<[4]xf64>, %[[VAL_3:.*]]: i64) -> vector<[4]xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, vector<[4]xf64>, i64) -> vector<[4]xf64>
+// CHECK:           llvm.return %[[VAL_2]] : vector<[4]xf64>
+// CHECK:         }
+func.func @wide_ternary_fvw(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) -> vector<[4] x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui64) -> vector<[4] x f64>
+  return %op3 : vector<[4] x f64>
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_ivw_ro(
+// CHECK-SAME:                                   %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf64>, %[[VAL_2:.*]]: i64) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i64
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf64>, i64) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @wide_ternary_ivw_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  vcix.wide.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui64)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @wide_ternary_ivv(
+// CHECK-SAME:                                %[[VAL_0:.*]]: vector<[4]xf32>, %[[VAL_1:.*]]: vector<[4]xf64>, %[[VAL_2:.*]]: i64) -> vector<[4]xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_4:.*]] = llvm.zext %[[VAL_3]] : i5 to i64
+// CHECK:           %[[VAL_5:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_4]], %[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf64>, i64) -> vector<[4]xf64>
+// CHECK:           llvm.return %[[VAL_1]] : vector<[4]xf64>
+// CHECK:         }
+func.func @wide_ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) -> vector<[4] x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  %0 = vcix.wide.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui64) -> vector<[4] x f64>
+  return %op3 : vector<[4] x f64>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @fixed_binary_vv_ro(
+// CHECK-SAME:                                  %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME:                                  %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64, rd = 30 : i64}> : (vector<4xf32>, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_binary_vv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_vv(
+// CHECK-SAME:                               %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME:                               %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_binary_vv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_xv_ro(
+// CHECK-SAME:                                  %[[VAL_0:.*]]: i64,
+// CHECK-SAME:                                  %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64, rd = 30 : i64}> : (i64, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_binary_xv_ro(%op1: i64, %op2 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i64, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_xv(
+// CHECK-SAME:                               %[[VAL_0:.*]]: i64,
+// CHECK-SAME:                               %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_binary_xv(%op1: i64, %op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (i64, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_fv_ro(
+// CHECK-SAME:                                  %[[VAL_0:.*]]: f32,
+// CHECK-SAME:                                  %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 1 : i64, rd = 30 : i64}> : (f32, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_binary_fv_ro(%op1: f32, %op2 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.binary.ro %op1, %op2 { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_fv(
+// CHECK-SAME:                               %[[VAL_0:.*]]: f32,
+// CHECK-SAME:                               %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = "vcix.intrin.binary"(%[[VAL_0]], %[[VAL_1]]) <{opcode = 1 : i64}> : (f32, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_2]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_binary_fv(%op1: f32, %op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.binary %op1, %op2 { opcode = 1 : i1 } : (f32, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_iv_ro(
+// CHECK-SAME:                                  %[[VAL_0:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_2:.*]] = llvm.zext %[[VAL_1]] : i5 to i64
+// CHECK:           "vcix.intrin.binary.ro"(%[[VAL_2]], %[[VAL_0]]) <{opcode = 3 : i64, rd = 30 : i64}> : (i64, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_binary_iv_ro(%op2 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  vcix.binary.ro %const, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_binary_iv(
+// CHECK-SAME:                               %[[VAL_0:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_2:.*]] = llvm.zext %[[VAL_1]] : i5 to i64
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.binary"(%[[VAL_2]], %[[VAL_0]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_binary_iv(%op2 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  %0 = vcix.binary %const, %op2 { opcode = 3 : i2 } : (i5, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @fixed_ternary_vvv_ro(
+// CHECK-SAME:                                    %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_ternary_vvv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_vvv(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_ternary_vvv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_xvv_ro(
+// CHECK-SAME:                                    %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_ternary_xvv_ro(%op1: i64, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i64, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_xvv(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_ternary_xvv(%op1: i64, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i64, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_fvv_ro(
+// CHECK-SAME:                                    %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i64}> : (f32, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_ternary_fvv_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_fvv(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i64}> : (f32, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_3]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_ternary_fvv(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_ivv_ro(
+// CHECK-SAME:                                    %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME:                                    %[[VAL_1:.*]]: vector<4xf32>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i64
+// CHECK:           "vcix.intrin.ternary.ro"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf32>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_ternary_ivv_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  vcix.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_ternary_ivv(
+// CHECK-SAME:                                 %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME:                                 %[[VAL_1:.*]]: vector<4xf32>) -> vector<4xf32> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i64
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.ternary"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+// CHECK:           llvm.return %[[VAL_4]] : vector<4xf32>
+// CHECK:         }
+func.func @fixed_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  %0 = vcix.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// -----
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_vvw_ro(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_wide_ternary_vvw_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_vvw(
+// CHECK-SAME:                                      %[[VAL_0:.*]]: vector<4xf32>, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK:           llvm.return %[[VAL_3]] : vector<4xf64>
+// CHECK:         }
+func.func @fixed_wide_ternary_vvw(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %0 : vector<4 x f64>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_xvw_ro(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_wide_ternary_xvw_ro(%op1: i64, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i64, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_xvw(
+// CHECK-SAME:                                      %[[VAL_0:.*]]: i64, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK:           llvm.return %[[VAL_3]] : vector<4xf64>
+// CHECK:         }
+func.func @fixed_wide_ternary_xvw(%op1: i64, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i64, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %0 : vector<4 x f64>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_fvw_ro(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i64}> : (f32, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_wide_ternary_fvw_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_fvw(
+// CHECK-SAME:                                      %[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: vector<4xf32>, %[[VAL_2:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_3:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]) <{opcode = 1 : i64}> : (f32, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK:           llvm.return %[[VAL_2]] : vector<4xf64>
+// CHECK:         }
+func.func @fixed_wide_ternary_fvw(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %op3 : vector<4 x f64>
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_ivw_ro(
+// CHECK-SAME:                                         %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME:                                         %[[VAL_1:.*]]: vector<4xf64>) attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i64
+// CHECK:           "vcix.intrin.wide.ternary.ro"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf64>) -> ()
+// CHECK:           llvm.return
+// CHECK:         }
+func.func @fixed_wide_ternary_ivw_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  vcix.wide.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+// CHECK-LABEL:   llvm.func @fixed_wide_ternary_ivv(
+// CHECK-SAME:                                      %[[VAL_0:.*]]: vector<4xf32>,
+// CHECK-SAME:                                      %[[VAL_1:.*]]: vector<4xf64>) -> vector<4xf64> attributes {vcix.target_features = "+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul"} {
+// CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(1 : i5) : i5
+// CHECK:           %[[VAL_3:.*]] = llvm.zext %[[VAL_2]] : i5 to i64
+// CHECK:           %[[VAL_4:.*]] = "vcix.intrin.wide.ternary"(%[[VAL_3]], %[[VAL_0]], %[[VAL_1]]) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+// CHECK:           llvm.return %[[VAL_1]] : vector<4xf64>
+// CHECK:         }
+func.func @fixed_wide_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> attributes { vcix.target_features="+64bit,+v,+zfh,+xsfvcp,+zvl2048b,+zvfh,+v,+zmmul" } {
+  %const = arith.constant 1 : i5
+  %0 = vcix.wide.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %op3 : vector<4 x f64>
+}
diff --git a/mlir/test/Dialect/VCIX/ops-rv64.mlir b/mlir/test/Dialect/VCIX/ops-rv64.mlir
new file mode 100644
index 0000000000000..2f6623c7c8aaf
--- /dev/null
+++ b/mlir/test/Dialect/VCIX/ops-rv64.mlir
@@ -0,0 +1,531 @@
+// RUN: mlir-opt %s -split-input-file -verify-diagnostics
+
+// -----
+func.func @unary_ro_e8mf8(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8mf8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e8mf4(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e8mf2(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e8m1(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e8m2(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e8m4(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e8m8(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e8m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// -----
+func.func @unary_e8mf8(%rvl: ui64) -> vector<[1] x i8>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[1] x i8>
+  return %0 : vector<[1] x i8>
+}
+
+func.func @unary_e8mf4(%rvl: ui64) -> vector<[2] x i8>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[2] x i8>
+  return %0 : vector<[2] x i8>
+}
+
+func.func @unary_e8mf2(%rvl: ui64) -> vector<[4] x i8>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[4] x i8>
+  return %0 : vector<[4] x i8>
+}
+
+func.func @unary_e8m1(%rvl: ui64) -> vector<[8] x i8>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[8] x i8>
+  return %0 : vector<[8] x i8>
+}
+
+func.func @unary_e8m2(%rvl: ui64) -> vector<[16] x i8>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[16] x i8>
+  return %0 : vector<[16] x i8>
+}
+
+func.func @unary_e8m4(%rvl: ui64) -> vector<[32] x i8>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[32] x i8>
+  return %0 : vector<[32] x i8>
+}
+
+func.func @unary_e8m8(%rvl: ui64) -> vector<[64] x i8>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[64] x i8>
+  return %0 : vector<[64] x i8>
+}
+
+// -----
+func.func @unary_ro_e16mf4(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e16mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e16mf2(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e16mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e16m1(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e16m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e16m2(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e16m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e16m4(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e16m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e16m8(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e16m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// -----
+func.func @unary_e16mf4(%rvl: ui64) -> vector<[1] x i16>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[1] x i16>
+  return %0 : vector<[1] x i16>
+}
+
+func.func @unary_e16mf2(%rvl: ui64) -> vector<[2] x i16>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[2] x i16>
+  return %0 : vector<[2] x i16>
+}
+
+func.func @unary_e16m1(%rvl: ui64) -> vector<[4] x i16>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[4] x i16>
+  return %0 : vector<[4] x i16>
+}
+
+func.func @unary_e16m2(%rvl: ui64) -> vector<[8] x i16>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[8] x i16>
+  return %0 : vector<[8] x i16>
+}
+
+func.func @unary_e16m4(%rvl: ui64) -> vector<[16] x i16>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[16] x i16>
+  return %0 : vector<[16] x i16>
+}
+
+func.func @unary_e16m8(%rvl: ui64) -> vector<[32] x i16>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[32] x i16>
+  return %0 : vector<[32] x i16>
+}
+
+// -----
+func.func @unary_ro_e32mf2(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e32mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e32m1(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e32m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e32m2(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e32m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e32m4(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e32m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e32m8(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e32m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// -----
+func.func @unary_e32mf2(%rvl: ui64) -> vector<[1]  xi32>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[1]  xi32>
+  return %0 : vector<[1]  xi32>
+}
+
+func.func @unary_e32m1(%rvl: ui64) -> vector<[2]  xi32>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[2]  xi32>
+  return %0 : vector<[2]  xi32>
+}
+
+func.func @unary_e32m2(%rvl: ui64) -> vector<[4]  xi32>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[4]  xi32>
+  return %0 : vector<[4]  xi32>
+}
+
+func.func @unary_e32m4(%rvl: ui64) -> vector<[8]  xi32>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[8]  xi32>
+  return %0 : vector<[8]  xi32>
+}
+
+func.func @unary_e32m8(%rvl: ui64) -> vector<[16]  xi32>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[16]  xi32>
+  return %0 : vector<[16]  xi32>
+}
+
+// -----
+func.func @unary_ro_e64m1(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e64m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e64m2(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e64m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e64m4(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e64m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+func.func @unary_ro_e64m8(%rvl: ui64) {
+  %const = arith.constant 0 : i64
+  vcix.unary.ro e64m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i64, ui64)
+  return
+}
+
+// -----
+func.func @unary_e64m1(%rvl: ui64) -> vector<[1]  xi32>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[1]  xi32>
+  return %0 : vector<[1]  xi32>
+}
+
+func.func @unary_e64m2(%rvl: ui64) -> vector<[2]  xi32>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[2]  xi32>
+  return %0 : vector<[2]  xi32>
+}
+
+func.func @unary_e64m4(%rvl: ui64) -> vector<[4]  xi32>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[4]  xi32>
+  return %0 : vector<[4]  xi32>
+}
+
+func.func @unary_e64m8(%rvl: ui64) -> vector<[8]  xi32>{
+  %const = arith.constant 0 : i64
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i64, ui64) -> vector<[8]  xi32>
+  return %0 : vector<[8]  xi32>
+}
+
+// -----
+func.func @binary_vv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui64) {
+  vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (vector<[4] x f32>, vector<[4] x f32>, ui64)
+  return
+}
+
+func.func @binary_vv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> {
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+func.func @binary_xv_ro(%op1: i64, %op2 : vector<[4] x f32>, %rvl : ui64) {
+  vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i64, vector<[4] x f32>, ui64)
+  return
+}
+
+func.func @binary_xv(%op1: i64, %op2 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> {
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+func.func @binary_fv_ro(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui64) {
+  vcix.binary.ro %op1, %op2, %rvl { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<[4] x f32>, ui64)
+  return
+}
+
+func.func @binary_fv(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> {
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+func.func @binary_iv_ro(%op2 : vector<[4] x f32>, %rvl : ui64) {
+  %const = arith.constant 1 : i5
+  vcix.binary.ro %const, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<[4] x f32>, ui64)
+  return
+}
+
+func.func @binary_iv(%op2 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> {
+  %const = arith.constant 1 : i5
+  %0 = vcix.binary %const, %op2, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @ternary_vvv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) {
+  vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui64)
+  return
+}
+
+func.func @ternary_vvv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> {
+  %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+func.func @ternary_xvv_ro(%op1: i64, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) {
+  vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, vector<[4] x f32>, ui64)
+  return
+}
+
+func.func @ternary_xvv(%op1: i64, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> {
+  %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+func.func @ternary_fvv_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) {
+  vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui64)
+  return
+}
+
+func.func @ternary_fvv(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> {
+  %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+func.func @ternary_ivv_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) {
+  %const = arith.constant 1 : i5
+  vcix.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui64)
+  return
+}
+
+func.func @ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui64) -> vector<[4] x f32> {
+  %const = arith.constant 1 : i5
+  %0 = vcix.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui64) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @wide_ternary_vvw_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) {
+  vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui64)
+  return
+}
+
+func.func @wide_ternary_vvw(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) -> vector<[4] x f64> {
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui64) -> vector<[4] x f64>
+  return %0: vector<[4] x f64>
+}
+
+func.func @wide_ternary_xvw_ro(%op1: i64, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) {
+  vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, vector<[4] x f64>, ui64)
+  return
+}
+
+func.func @wide_ternary_xvw(%op1: i64, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) -> vector<[4] x f64> {
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i64, vector<[4] x f32>, vector<[4] x f64>, ui64) -> vector<[4] x f64>
+  return %0 : vector<[4] x f64>
+}
+
+func.func @wide_ternary_fvw_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) {
+  vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui64)
+  return
+}
+
+func.func @wide_ternary_fvw(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) -> vector<[4] x f64> {
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui64) -> vector<[4] x f64>
+  return %op3 : vector<[4] x f64>
+}
+
+func.func @wide_ternary_ivw_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) {
+  %const = arith.constant 1 : i5
+  vcix.wide.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui64)
+  return
+}
+
+func.func @wide_ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui64) -> vector<[4] x f64> {
+  %const = arith.constant 1 : i5
+  %0 = vcix.wide.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui64) -> vector<[4] x f64>
+  return %op3 : vector<[4] x f64>
+}
+
+// -----
+func.func @fixed_binary_vv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) {
+  vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_binary_vv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) -> vector<4 x f32> {
+  %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+func.func @fixed_binary_xv_ro(%op1: i64, %op2 : vector<4 x f32>) {
+  vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i64, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_binary_xv(%op1: i64, %op2 : vector<4 x f32>) -> vector<4 x f32> {
+  %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (i64, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+func.func @fixed_binary_fv_ro(%op1: f32, %op2 : vector<4 x f32>) {
+  vcix.binary.ro %op1, %op2 { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_binary_fv(%op1: f32, %op2 : vector<4 x f32>) -> vector<4 x f32> {
+  %0 = vcix.binary %op1, %op2 { opcode = 1 : i1 } : (f32, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+func.func @fixed_binary_iv_ro(%op2 : vector<4 x f32>) {
+  %const = arith.constant 1 : i5
+  vcix.binary.ro %const, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_binary_iv(%op2 : vector<4 x f32>) -> vector<4 x f32> {
+  %const = arith.constant 1 : i5
+  %0 = vcix.binary %const, %op2 { opcode = 3 : i2 } : (i5, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// -----
+func.func @fixed_ternary_vvv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+  vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_ternary_vvv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+  %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+func.func @fixed_ternary_xvv_ro(%op1: i64, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+  vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i64, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_ternary_xvv(%op1: i64, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+  %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i64, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+func.func @fixed_ternary_fvv_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+  vcix.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_ternary_fvv(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+  %0 = vcix.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+func.func @fixed_ternary_ivv_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+  %const = arith.constant 1 : i5
+  vcix.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+  %const = arith.constant 1 : i5
+  %0 = vcix.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// -----
+func.func @fixed_wide_ternary_vvw_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+  vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+func.func @fixed_wide_ternary_vvw(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+  %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %0 : vector<4 x f64>
+}
+
+func.func @fixed_wide_ternary_xvw_ro(%op1: i64, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+  vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i64, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+func.func @fixed_wide_ternary_xvw(%op1: i64, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+  %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i64, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %0 : vector<4 x f64>
+}
+
+func.func @fixed_wide_ternary_fvw_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+  vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+func.func @fixed_wide_ternary_fvw(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+  %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %op3 : vector<4 x f64>
+}
+
+func.func @fixed_wide_ternary_ivw_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+  %const = arith.constant 1 : i5
+  vcix.wide.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+func.func @fixed_wide_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+  %const = arith.constant 1 : i5
+  %0 = vcix.wide.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %op3 : vector<4 x f64>
+}
diff --git a/mlir/test/Dialect/VCIX/ops.mlir b/mlir/test/Dialect/VCIX/ops.mlir
new file mode 100644
index 0000000000000..c5392d67d50d5
--- /dev/null
+++ b/mlir/test/Dialect/VCIX/ops.mlir
@@ -0,0 +1,531 @@
+// RUN: mlir-opt %s -split-input-file -verify-diagnostics
+
+// -----
+func.func @unary_ro_e8mf8(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8mf8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e8mf4(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e8mf2(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e8m1(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e8m2(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e8m4(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e8m8(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e8m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// -----
+func.func @unary_e8mf8(%rvl: ui32) -> vector<[1] x i8>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i8>
+  return %0 : vector<[1] x i8>
+}
+
+func.func @unary_e8mf4(%rvl: ui32) -> vector<[2] x i8>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i8>
+  return %0 : vector<[2] x i8>
+}
+
+func.func @unary_e8mf2(%rvl: ui32) -> vector<[4] x i8>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i8>
+  return %0 : vector<[4] x i8>
+}
+
+func.func @unary_e8m1(%rvl: ui32) -> vector<[8] x i8>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i8>
+  return %0 : vector<[8] x i8>
+}
+
+func.func @unary_e8m2(%rvl: ui32) -> vector<[16] x i8>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i8>
+  return %0 : vector<[16] x i8>
+}
+
+func.func @unary_e8m4(%rvl: ui32) -> vector<[32] x i8>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[32] x i8>
+  return %0 : vector<[32] x i8>
+}
+
+func.func @unary_e8m8(%rvl: ui32) -> vector<[64] x i8>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[64] x i8>
+  return %0 : vector<[64] x i8>
+}
+
+// -----
+func.func @unary_ro_e16mf4(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e16mf4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e16mf2(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e16mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e16m1(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e16m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e16m2(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e16m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e16m4(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e16m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e16m8(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e16m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// -----
+func.func @unary_e16mf4(%rvl: ui32) -> vector<[1] x i16>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i16>
+  return %0 : vector<[1] x i16>
+}
+
+func.func @unary_e16mf2(%rvl: ui32) -> vector<[2] x i16>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i16>
+  return %0 : vector<[2] x i16>
+}
+
+func.func @unary_e16m1(%rvl: ui32) -> vector<[4] x i16>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i16>
+  return %0 : vector<[4] x i16>
+}
+
+func.func @unary_e16m2(%rvl: ui32) -> vector<[8] x i16>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i16>
+  return %0 : vector<[8] x i16>
+}
+
+func.func @unary_e16m4(%rvl: ui32) -> vector<[16] x i16>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i16>
+  return %0 : vector<[16] x i16>
+}
+
+func.func @unary_e16m8(%rvl: ui32) -> vector<[32] x i16>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[32] x i16>
+  return %0 : vector<[32] x i16>
+}
+
+// -----
+func.func @unary_ro_e32mf2(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e32mf2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e32m1(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e32m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e32m2(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e32m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e32m4(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e32m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e32m8(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e32m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// -----
+func.func @unary_e32mf2(%rvl: ui32) -> vector<[1] x i32>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i32>
+  return %0 : vector<[1] x i32>
+}
+
+func.func @unary_e32m1(%rvl: ui32) -> vector<[2] x i32>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i32>
+  return %0 : vector<[2] x i32>
+}
+
+func.func @unary_e32m2(%rvl: ui32) -> vector<[4] x i32>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i32>
+  return %0 : vector<[4] x i32>
+}
+
+func.func @unary_e32m4(%rvl: ui32) -> vector<[8] x i32>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i32>
+  return %0 : vector<[8] x i32>
+}
+
+func.func @unary_e32m8(%rvl: ui32) -> vector<[16] x i32>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[16] x i32>
+  return %0 : vector<[16] x i32>
+}
+
+// -----
+func.func @unary_ro_e64m1(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e64m1 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e64m2(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e64m2 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e64m4(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e64m4 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+func.func @unary_ro_e64m8(%rvl: ui32) {
+  %const = arith.constant 0 : i32
+  vcix.unary.ro e64m8 %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5, rd = 30 : i5 } : (i32, ui32)
+  return
+}
+
+// -----
+func.func @unary_e64m1(%rvl: ui32) -> vector<[1] x i64>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[1] x i64>
+  return %0 : vector<[1] x i64>
+}
+
+func.func @unary_e64m2(%rvl: ui32) -> vector<[2] x i64>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[2] x i64>
+  return %0 : vector<[2] x i64>
+}
+
+func.func @unary_e64m4(%rvl: ui32) -> vector<[4] x i64>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[4] x i64>
+  return %0 : vector<[4] x i64>
+}
+
+func.func @unary_e64m8(%rvl: ui32) -> vector<[8] x i64>{
+  %const = arith.constant 0 : i32
+  %0 = vcix.unary %const, %rvl { opcode = 3 : i2, rs2 = 31 : i5} : (i32, ui32) -> vector<[8] x i64>
+  return %0 : vector<[8] x i64>
+}
+
+// -----
+func.func @binary_vv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui32) {
+  vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (vector<[4] x f32>, vector<[4] x f32>, ui32)
+  return
+}
+
+func.func @binary_vv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+func.func @binary_xv_ro(%op1: i32, %op2 : vector<[4] x f32>, %rvl : ui32) {
+  vcix.binary.ro %op1, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i32, vector<[4] x f32>, ui32)
+  return
+}
+
+func.func @binary_xv(%op1: i32, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+func.func @binary_fv_ro(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui32) {
+  vcix.binary.ro %op1, %op2, %rvl { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<[4] x f32>, ui32)
+  return
+}
+
+func.func @binary_fv(%op1: f32, %op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+  %0 = vcix.binary %op1, %op2, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+func.func @binary_iv_ro(%op2 : vector<[4] x f32>, %rvl : ui32) {
+  %const = arith.constant 1 : i5
+  vcix.binary.ro %const, %op2, %rvl { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<[4] x f32>, ui32)
+  return
+}
+
+func.func @binary_iv(%op2 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+  %const = arith.constant 1 : i5
+  %0 = vcix.binary %const, %op2, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @ternary_vvv_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) {
+  vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui32)
+  return
+}
+
+func.func @ternary_vvv(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+  %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+func.func @ternary_xvv_ro(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) {
+  vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f32>, ui32)
+  return
+}
+
+func.func @ternary_xvv(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+  %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+func.func @ternary_fvv_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) {
+  vcix.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui32)
+  return
+}
+
+func.func @ternary_fvv(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+  %0 = vcix.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+func.func @ternary_ivv_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) {
+  %const = arith.constant 1 : i5
+  vcix.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui32)
+  return
+}
+
+func.func @ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f32>, %rvl : ui32) -> vector<[4] x f32> {
+  %const = arith.constant 1 : i5
+  %0 = vcix.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f32>, ui32) -> vector<[4] x f32>
+  return %0 : vector<[4] x f32>
+}
+
+// -----
+func.func @wide_ternary_vvw_ro(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) {
+  vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui32)
+  return
+}
+
+func.func @wide_ternary_vvw(%op1: vector<[4] x f32>, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (vector<[4] x f32>, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+  return %0: vector<[4] x f64>
+}
+
+func.func @wide_ternary_xvw_ro(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) {
+  vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f64>, ui32)
+  return
+}
+
+func.func @wide_ternary_xvw(%op1: i32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 3 : i2 } : (i32, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+  return %0 : vector<[4] x f64>
+}
+
+func.func @wide_ternary_fvw_ro(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) {
+  vcix.wide.ternary.ro %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui32)
+  return
+}
+
+func.func @wide_ternary_fvw(%op1: f32, %op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+  %0 = vcix.wide.ternary %op1, %op2, %op3, %rvl { opcode = 1 : i1 } : (f32, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+  return %op3 : vector<[4] x f64>
+}
+
+func.func @wide_ternary_ivw_ro(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) {
+  %const = arith.constant 1 : i5
+  vcix.wide.ternary.ro %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui32)
+  return
+}
+
+func.func @wide_ternary_ivv(%op2 : vector<[4] x f32>, %op3 : vector<[4] x f64>, %rvl : ui32) -> vector<[4] x f64> {
+  %const = arith.constant 1 : i5
+  %0 = vcix.wide.ternary %const, %op2, %op3, %rvl { opcode = 3 : i2 } : (i5, vector<[4] x f32>, vector<[4] x f64>, ui32) -> vector<[4] x f64>
+  return %op3 : vector<[4] x f64>
+}
+
+// -----
+func.func @fixed_binary_vv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) {
+  vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_binary_vv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>) -> vector<4 x f32> {
+  %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+func.func @fixed_binary_xv_ro(%op1: i32, %op2 : vector<4 x f32>) {
+  vcix.binary.ro %op1, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i32, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_binary_xv(%op1: i32, %op2 : vector<4 x f32>) -> vector<4 x f32> {
+  %0 = vcix.binary %op1, %op2 { opcode = 3 : i2 } : (i32, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+func.func @fixed_binary_fv_ro(%op1: f32, %op2 : vector<4 x f32>) {
+  vcix.binary.ro %op1, %op2 { opcode = 1 : i1, rd = 30 : i5 } : (f32, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_binary_fv(%op1: f32, %op2 : vector<4 x f32>) -> vector<4 x f32> {
+  %0 = vcix.binary %op1, %op2 { opcode = 1 : i1 } : (f32, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+func.func @fixed_binary_iv_ro(%op2 : vector<4 x f32>) {
+  %const = arith.constant 1 : i5
+  vcix.binary.ro %const, %op2 { opcode = 3 : i2, rd = 30 : i5 } : (i5, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_binary_iv(%op2 : vector<4 x f32>) -> vector<4 x f32> {
+  %const = arith.constant 1 : i5
+  %0 = vcix.binary %const, %op2 { opcode = 3 : i2 } : (i5, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// -----
+func.func @fixed_ternary_vvv_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+  vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_ternary_vvv(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+  %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+func.func @fixed_ternary_xvv_ro(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+  vcix.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_ternary_xvv(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+  %0 = vcix.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+func.func @fixed_ternary_fvv_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+  vcix.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_ternary_fvv(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+  %0 = vcix.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+func.func @fixed_ternary_ivv_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) {
+  %const = arith.constant 1 : i5
+  vcix.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>)
+  return
+}
+
+func.func @fixed_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f32>) -> vector<4 x f32> {
+  %const = arith.constant 1 : i5
+  %0 = vcix.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f32>) -> vector<4 x f32>
+  return %0 : vector<4 x f32>
+}
+
+// -----
+func.func @fixed_wide_ternary_vvw_ro(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+  vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+func.func @fixed_wide_ternary_vvw(%op1: vector<4 x f32>, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+  %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (vector<4 x f32>, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %0 : vector<4 x f64>
+}
+
+func.func @fixed_wide_ternary_xvw_ro(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+  vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+func.func @fixed_wide_ternary_xvw(%op1: i32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+  %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 3 : i2 } : (i32, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %0 : vector<4 x f64>
+}
+
+func.func @fixed_wide_ternary_fvw_ro(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+  vcix.wide.ternary.ro %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+func.func @fixed_wide_ternary_fvw(%op1: f32, %op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+  %0 = vcix.wide.ternary %op1, %op2, %op3 { opcode = 1 : i1 } : (f32, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %op3 : vector<4 x f64>
+}
+
+func.func @fixed_wide_ternary_ivw_ro(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) {
+  %const = arith.constant 1 : i5
+  vcix.wide.ternary.ro %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>)
+  return
+}
+
+func.func @fixed_wide_ternary_ivv(%op2 : vector<4 x f32>, %op3 : vector<4 x f64>) -> vector<4 x f64> {
+  %const = arith.constant 1 : i5
+  %0 = vcix.wide.ternary %const, %op2, %op3 { opcode = 3 : i2 } : (i5, vector<4 x f32>, vector<4 x f64>) -> vector<4 x f64>
+  return %op3 : vector<4 x f64>
+}
diff --git a/mlir/test/Target/LLVMIR/vcix-rv32.mlir b/mlir/test/Target/LLVMIR/vcix-rv32.mlir
new file mode 100644
index 0000000000000..2e39759f168a1
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/vcix-rv32.mlir
@@ -0,0 +1,897 @@
+// RUN: mlir-translate --mlir-to-llvmir %s | FileCheck %s
+
+// CHECK-LABEL: define void @unary_ro_e8mf8(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8mf8.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8mf8(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 0 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e8mf4(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8mf4.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8mf4(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 1 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e8mf2(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8mf2.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8mf2(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 2 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e8m1(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8m1.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8m1(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 3 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e8m2(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8m2.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8m2(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 4 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e8m4(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8m4.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8m4(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 5 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e8m8(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8m8.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8m8(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 6 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 1 x i8> @unary_e8mf8(i32 %0) {
+// CHECK:   %2 = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 1 x i8> %2
+// CHECK: }
+llvm.func @unary_e8mf8(%arg0: i32) -> vector<[1]xi8> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[1]xi8>
+  llvm.return %1 : vector<[1]xi8>
+}
+
+// CHECK-LABEL: define <vscale x 2 x i8> @unary_e8mf4(i32 %0) {
+// CHECK:   %2 = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 2 x i8> %2
+// CHECK: }
+llvm.func @unary_e8mf4(%arg0: i32) -> vector<[2]xi8> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[2]xi8>
+  llvm.return %1 : vector<[2]xi8>
+}
+
+// CHECK-LABEL: define <vscale x 4 x i8> @unary_e8mf2(i32 %0) {
+// CHECK:   %2 = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 4 x i8> %2
+// CHECK: }
+llvm.func @unary_e8mf2(%arg0: i32) -> vector<[4]xi8> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[4]xi8>
+  llvm.return %1 : vector<[4]xi8>
+}
+
+// CHECK-LABEL: define <vscale x 8 x i8> @unary_e8m1(i32 %0) {
+// CHECK:   %2 = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 8 x i8> %2
+// CHECK: }
+llvm.func @unary_e8m1(%arg0: i32) -> vector<[8]xi8> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[8]xi8>
+  llvm.return %1 : vector<[8]xi8>
+}
+
+// CHECK-LABEL: define <vscale x 16 x i8> @unary_e8m2(i32 %0) {
+// CHECK:   %2 = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 16 x i8> %2
+// CHECK: }
+llvm.func @unary_e8m2(%arg0: i32) -> vector<[16]xi8> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[16]xi8>
+  llvm.return %1 : vector<[16]xi8>
+}
+
+// CHECK-LABEL: define <vscale x 32 x i8> @unary_e8m4(i32 %0) {
+// CHECK:   %2 = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 32 x i8> %2
+// CHECK: }
+llvm.func @unary_e8m4(%arg0: i32) -> vector<[32]xi8> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[32]xi8>
+  llvm.return %1 : vector<[32]xi8>
+}
+
+// CHECK-LABEL: define <vscale x 64 x i8> @unary_e8m8(i32 %0) {
+// CHECK:   %2 = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 64 x i8> %2
+// CHECK: }
+llvm.func @unary_e8m8(%arg0: i32) -> vector<[64]xi8> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[64]xi8>
+  llvm.return %1 : vector<[64]xi8>
+}
+
+// CHECK-LABEL: define void @unary_ro_e16mf4(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e16mf4.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e16mf4(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 7 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e16mf2(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e16mf2.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e16mf2(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 8 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e16m1(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e16m1.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e16m1(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 9 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e16m2(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e16m2.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e16m2(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 10 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e16m4(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e16m4.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e16m4(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 11 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e16m8(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e16m8.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e16m8(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 12 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 1 x i16> @unary_e16mf4(i32 %0) {
+// CHECK:   %2 = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 1 x i16> %2
+// CHECK: }
+llvm.func @unary_e16mf4(%arg0: i32) -> vector<[1]xi16> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[1]xi16>
+  llvm.return %1 : vector<[1]xi16>
+}
+
+// CHECK-LABEL: define <vscale x 2 x i16> @unary_e16mf2(i32 %0) {
+// CHECK:   %2 = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 2 x i16> %2
+// CHECK: }
+llvm.func @unary_e16mf2(%arg0: i32) -> vector<[2]xi16> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[2]xi16>
+  llvm.return %1 : vector<[2]xi16>
+}
+
+// CHECK-LABEL: define <vscale x 4 x i16> @unary_e16m1(i32 %0) {
+// CHECK:   %2 = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 4 x i16> %2
+// CHECK: }
+llvm.func @unary_e16m1(%arg0: i32) -> vector<[4]xi16> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[4]xi16>
+  llvm.return %1 : vector<[4]xi16>
+}
+
+// CHECK-LABEL: define <vscale x 8 x i16> @unary_e16m2(i32 %0) {
+// CHECK:   %2 = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 8 x i16> %2
+// CHECK: }
+llvm.func @unary_e16m2(%arg0: i32) -> vector<[8]xi16> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[8]xi16>
+  llvm.return %1 : vector<[8]xi16>
+}
+
+// CHECK-LABEL: define <vscale x 16 x i16> @unary_e16m4(i32 %0) {
+// CHECK:   %2 = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 16 x i16> %2
+// CHECK: }
+llvm.func @unary_e16m4(%arg0: i32) -> vector<[16]xi16> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[16]xi16>
+  llvm.return %1 : vector<[16]xi16>
+}
+
+// CHECK-LABEL: define <vscale x 32 x i16> @unary_e16m8(i32 %0) {
+// CHECK:   %2 = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 32 x i16> %2
+// CHECK: }
+llvm.func @unary_e16m8(%arg0: i32) -> vector<[32]xi16> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[32]xi16>
+  llvm.return %1 : vector<[32]xi16>
+}
+
+// CHECK-LABEL: define void @unary_ro_e32mf2(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e32mf2.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e32mf2(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 13 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e32m1(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e32m1.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e32m1(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 14 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e32m2(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e32m2.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e32m2(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 15 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e32m4(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e32m4.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e32m4(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 16 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e32m8(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e32m8.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e32m8(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 17 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 1 x i32> @unary_e32mf2(i32 %0) {
+// CHECK:   %2 = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 1 x i32> %2
+// CHECK: }
+llvm.func @unary_e32mf2(%arg0: i32) -> vector<[1]xi32> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[1]xi32>
+  llvm.return %1 : vector<[1]xi32>
+}
+
+// CHECK-LABEL: define <vscale x 2 x i32> @unary_e32m1(i32 %0) {
+// CHECK:   %2 = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 2 x i32> %2
+// CHECK: }
+llvm.func @unary_e32m1(%arg0: i32) -> vector<[2]xi32> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[2]xi32>
+  llvm.return %1 : vector<[2]xi32>
+}
+
+// CHECK-LABEL: define <vscale x 4 x i32> @unary_e32m2(i32 %0) {
+// CHECK:   %2 = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 4 x i32> %2
+// CHECK: }
+llvm.func @unary_e32m2(%arg0: i32) -> vector<[4]xi32> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[4]xi32>
+  llvm.return %1 : vector<[4]xi32>
+}
+
+// CHECK-LABEL: define <vscale x 8 x i32> @unary_e32m4(i32 %0) {
+// CHECK:   %2 = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 8 x i32> %2
+// CHECK: }
+llvm.func @unary_e32m4(%arg0: i32) -> vector<[8]xi32> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[8]xi32>
+  llvm.return %1 : vector<[8]xi32>
+}
+
+// CHECK-LABEL: define <vscale x 16 x i32> @unary_e32m8(i32 %0) {
+// CHECK:   %2 = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 16 x i32> %2
+// CHECK: }
+llvm.func @unary_e32m8(%arg0: i32) -> vector<[16]xi32> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[16]xi32>
+  llvm.return %1 : vector<[16]xi32>
+}
+
+// CHECK-LABEL: define void @unary_ro_e64m1(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e64m1.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e64m1(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 18 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e64m2(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e64m2.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e64m2(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 19 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e64m4(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e64m4.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e64m4(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 20 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e64m8(i32 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e64m8.i32.i32.i32(i32 3, i32 31, i32 30, i32 0, i32 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e64m8(%arg0: i32) {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i32, rd = 30 : i32, rs2 = 31 : i32, sew_lmul = 21 : i32}> : (i32, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 1 x i64> @unary_e64m1(i32 %0) {
+// CHECK:   %2 = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.x.nxv1i64.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 1 x i64> %2
+// CHECK: }
+llvm.func @unary_e64m1(%arg0: i32) -> vector<[1]xi64> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[1]xi64>
+  llvm.return %1 : vector<[1]xi64>
+}
+
+// CHECK-LABEL: define <vscale x 2 x i64> @unary_e64m2(i32 %0) {
+// CHECK:   %2 = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.x.nxv2i64.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 2 x i64> %2
+// CHECK: }
+llvm.func @unary_e64m2(%arg0: i32) -> vector<[2]xi64> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[2]xi64>
+  llvm.return %1 : vector<[2]xi64>
+}
+
+// CHECK-LABEL: define <vscale x 4 x i64> @unary_e64m4(i32 %0) {
+// CHECK:   %2 = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.x.nxv4i64.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 4 x i64> %2
+// CHECK: }
+llvm.func @unary_e64m4(%arg0: i32) -> vector<[4]xi64> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[4]xi64>
+  llvm.return %1 : vector<[4]xi64>
+}
+
+// CHECK-LABEL: define <vscale x 8 x i64> @unary_e64m8(i32 %0) {
+// CHECK:   %2 = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.x.nxv8i64.i32.i32.i32(i32 3, i32 31, i32 0, i32 %0)
+// CHECK:   ret <vscale x 8 x i64> %2
+// CHECK: }
+llvm.func @unary_e64m8(%arg0: i32) -> vector<[8]xi64> {
+  %0 = llvm.mlir.constant(0 : i32) : i32
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i32, rs2 = 31 : i32}> : (i32, i32) -> vector<[8]xi64>
+  llvm.return %1 : vector<[8]xi64>
+}
+
+// CHECK-LABEL: define void @binary_vv_ro(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.vv.se.i32.nxv4f32.nxv4f32.i32(i32 3, i32 30, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i32 %2)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @binary_vv_ro(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: i32) {
+  "vcix.intrin.binary.ro"(%arg0, %arg1, %arg2) <{opcode = 3 : i32, rd = 30 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_vv(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) {
+// CHECK:   %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.i32.nxv4f32.nxv4f32.i32(i32 3, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i32 %2)
+// CHECK:   ret <vscale x 4 x float> %4
+// CHECK: }
+llvm.func @binary_vv(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: i32) -> vector<[4]xf32> {
+  %0 = "vcix.intrin.binary"(%arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+  llvm.return %0 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @binary_xv_ro(i32 %0, <vscale x 4 x float> %1, i32 %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.xv.se.i32.nxv4f32.i32.i32(i32 3, i32 30, <vscale x 4 x float> %1, i32 %0, i32 %2)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @binary_xv_ro(%arg0: i32, %arg1: vector<[4]xf32>, %arg2: i32) {
+  "vcix.intrin.binary.ro"(%arg0, %arg1, %arg2) <{opcode = 3 : i32, rd = 30 : i32}> : (i32, vector<[4]xf32>, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_xv(i32 %0, <vscale x 4 x float> %1, i32 %2) {
+// CHECK:   %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i32.nxv4f32.i32.i32(i32 3, <vscale x 4 x float> %1, i32 %0, i32 %2)
+// CHECK:   ret <vscale x 4 x float> %4
+// CHECK: }
+llvm.func @binary_xv(%arg0: i32, %arg1: vector<[4]xf32>, %arg2: i32) -> vector<[4]xf32> {
+  %0 = "vcix.intrin.binary"(%arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, i32) -> vector<[4]xf32>
+  llvm.return %0 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @binary_fv_ro(float %0, <vscale x 4 x float> %1, i32 %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.fv.se.i32.nxv4f32.f32.i32(i32 1, i32 30, <vscale x 4 x float> %1, float %0, i32 %2)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @binary_fv_ro(%arg0: f32, %arg1: vector<[4]xf32>, %arg2: i32) {
+  "vcix.intrin.binary.ro"(%arg0, %arg1, %arg2) <{opcode = 1 : i32, rd = 30 : i32}> : (f32, vector<[4]xf32>, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_fv(float %0, <vscale x 4 x float> %1, i32 %2) {
+// CHECK:   %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.i32.nxv4f32.f32.i32(i32 1, <vscale x 4 x float> %1, float %0, i32 %2)
+// CHECK:   ret <vscale x 4 x float> %4
+// CHECK: }
+llvm.func @binary_fv(%arg0: f32, %arg1: vector<[4]xf32>, %arg2: i32) -> vector<[4]xf32> {
+  %0 = "vcix.intrin.binary"(%arg0, %arg1, %arg2) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, i32) -> vector<[4]xf32>
+  llvm.return %0 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @binary_iv_ro(<vscale x 4 x float> %0, i32 %1) {
+// CHECK:   call void @llvm.riscv.sf.vc.xv.se.i32.nxv4f32.i32.i32(i32 3, i32 30, <vscale x 4 x float> %0, i32 1, i32 %1)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @binary_iv_ro(%arg0: vector<[4]xf32>, %arg1: i32) {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i32
+  "vcix.intrin.binary.ro"(%1, %arg0, %arg1) <{opcode = 3 : i32, rd = 30 : i32}> : (i32, vector<[4]xf32>, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_iv(<vscale x 4 x float> %0, i32 %1) {
+// CHECK:   %3 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i32.nxv4f32.i32.i32(i32 3, <vscale x 4 x float> %0, i32 1, i32 %1)
+// CHECK:   ret <vscale x 4 x float> %3
+// CHECK: }
+llvm.func @binary_iv(%arg0: vector<[4]xf32>, %arg1: i32) -> vector<[4]xf32> {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i32
+  %2 = "vcix.intrin.binary"(%1, %arg0, %arg1) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, i32) -> vector<[4]xf32>
+  llvm.return %2 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @ternary_vvv_ro(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) {
+// CHECK:   call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4f32.nxv4f32.nxv4f32.i32(i32 3, <vscale x 4 x float> %2, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i32 %3)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @ternary_vvv_ro(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf32>, %arg3: i32) {
+  "vcix.intrin.ternary.ro"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @ternary_vvv(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) {
+// CHECK:   %5 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.i32.nxv4f32.nxv4f32.nxv4f32.i32(i32 3, <vscale x 4 x float> %2, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i32 %3)
+// CHECK:   ret <vscale x 4 x float> %5
+// CHECK: }
+llvm.func @ternary_vvv(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf32>, %arg3: i32) -> vector<[4]xf32> {
+  %0 = "vcix.intrin.ternary"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+  llvm.return %0 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @ternary_xvv_ro(i32 %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4f32.nxv4f32.i32.i32(i32 3, <vscale x 4 x float> %2, <vscale x 4 x float> %1, i32 %0, i32 %3)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @ternary_xvv_ro(%arg0: i32, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf32>, %arg3: i32) {
+  "vcix.intrin.ternary.ro"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @ternary_xvv(i32 %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) {
+// CHECK:   %5 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.i32.nxv4f32.nxv4f32.i32.i32(i32 3, <vscale x 4 x float> %2, <vscale x 4 x float> %1, i32 %0, i32 %3)
+// CHECK:   ret <vscale x 4 x float> %5
+// CHECK: }
+llvm.func @ternary_xvv(%arg0: i32, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf32>, %arg3: i32) -> vector<[4]xf32> {
+  %0 = "vcix.intrin.ternary"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+  llvm.return %0 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @ternary_fvv_ro(float %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) {
+// CHECK:   call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4f32.nxv4f32.f32.i32(i32 1, <vscale x 4 x float> %2, <vscale x 4 x float> %1, float %0, i32 %3)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @ternary_fvv_ro(%arg0: f32, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf32>, %arg3: i32) {
+  "vcix.intrin.ternary.ro"(%arg0, %arg1, %arg2, %arg3) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @ternary_fvv(float %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) {
+// CHECK:   %5 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.i32.nxv4f32.nxv4f32.f32.i32(i32 1, <vscale x 4 x float> %2, <vscale x 4 x float> %1, float %0, i32 %3)
+// CHECK:   ret <vscale x 4 x float> %5
+// CHECK: }
+llvm.func @ternary_fvv(%arg0: f32, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf32>, %arg3: i32) -> vector<[4]xf32> {
+  %0 = "vcix.intrin.ternary"(%arg0, %arg1, %arg2, %arg3) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+  llvm.return %0 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @ternary_ivv_ro(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4f32.nxv4f32.i32.i32(i32 3, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i32 1, i32 %2)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @ternary_ivv_ro(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: i32) {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i32
+  "vcix.intrin.ternary.ro"(%1, %arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf32>, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @ternary_ivv(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) {
+// CHECK:   %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.i32.nxv4f32.nxv4f32.i32.i32(i32 3, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i32 1, i32 %2)
+// CHECK:   ret <vscale x 4 x float> %4
+// CHECK: }
+llvm.func @ternary_ivv(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: i32) -> vector<[4]xf32> {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i32
+  %2 = "vcix.intrin.ternary"(%1, %arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+  llvm.return %2 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @wide_ternary_vvw_ro(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x double> %2, i32 %3) {
+// CHECK:   call void @llvm.riscv.sf.vc.vvw.se.i32.nxv4f64.nxv4f32.nxv4f32.i32(i32 3, <vscale x 4 x double> %2, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i32 %3)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @wide_ternary_vvw_ro(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf64>, %arg3: i32) {
+  "vcix.intrin.wide.ternary.ro"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf64>, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x double> @wide_ternary_vvw(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x double> %2, i32 %3) {
+// CHECK:   %5 = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.i32.nxv4f64.nxv4f32.nxv4f32.i32(i32 3, <vscale x 4 x double> %2, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i32 %3)
+// CHECK:   ret <vscale x 4 x double> %5
+// CHECK: }
+llvm.func @wide_ternary_vvw(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf64>, %arg3: i32) -> vector<[4]xf64> {
+  %0 = "vcix.intrin.wide.ternary"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf64>, i32) -> vector<[4]xf64>
+  llvm.return %0 : vector<[4]xf64>
+}
+
+// CHECK-LABEL: define void @wide_ternary_xvw_ro(i32 %0, <vscale x 4 x float> %1, <vscale x 4 x double> %2, i32 %3) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvw.se.i32.nxv4f64.nxv4f32.i32.i32(i32 3, <vscale x 4 x double> %2, <vscale x 4 x float> %1, i32 %0, i32 %3)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @wide_ternary_xvw_ro(%arg0: i32, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf64>, %arg3: i32) {
+  "vcix.intrin.wide.ternary.ro"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf64>, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x double> @wide_ternary_xvw(i32 %0, <vscale x 4 x float> %1, <vscale x 4 x double> %2, i32 %3) {
+// CHECK:   %5 = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i32.nxv4f64.nxv4f32.i32.i32(i32 3, <vscale x 4 x double> %2, <vscale x 4 x float> %1, i32 %0, i32 %3)
+// CHECK:   ret <vscale x 4 x double> %5
+// CHECK: }
+llvm.func @wide_ternary_xvw(%arg0: i32, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf64>, %arg3: i32) -> vector<[4]xf64> {
+  %0 = "vcix.intrin.wide.ternary"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf64>, i32) -> vector<[4]xf64>
+  llvm.return %0 : vector<[4]xf64>
+}
+
+// CHECK-LABEL: define void @wide_ternary_fvw_ro(float %0, <vscale x 4 x float> %1, <vscale x 4 x double> %2, i32 %3) {
+// CHECK:   call void @llvm.riscv.sf.vc.fvw.se.i32.nxv4f64.nxv4f32.f32.i32(i32 1, <vscale x 4 x double> %2, <vscale x 4 x float> %1, float %0, i32 %3)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @wide_ternary_fvw_ro(%arg0: f32, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf64>, %arg3: i32) {
+  "vcix.intrin.wide.ternary.ro"(%arg0, %arg1, %arg2, %arg3) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, vector<[4]xf64>, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x double> @wide_ternary_fvw(float %0, <vscale x 4 x float> %1, <vscale x 4 x double> %2, i32 %3) {
+// CHECK:   %5 = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.i32.nxv4f64.nxv4f32.f32.i32(i32 1, <vscale x 4 x double> %2, <vscale x 4 x float> %1, float %0, i32 %3)
+// CHECK:   ret <vscale x 4 x double> %2
+// CHECK: }
+llvm.func @wide_ternary_fvw(%arg0: f32, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf64>, %arg3: i32) -> vector<[4]xf64> {
+  %0 = "vcix.intrin.wide.ternary"(%arg0, %arg1, %arg2, %arg3) <{opcode = 1 : i32}> : (f32, vector<[4]xf32>, vector<[4]xf64>, i32) -> vector<[4]xf64>
+  llvm.return %arg2 : vector<[4]xf64>
+}
+
+// CHECK-LABEL: define void @wide_ternary_ivw_ro(<vscale x 4 x float> %0, <vscale x 4 x double> %1, i32 %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvw.se.i32.nxv4f64.nxv4f32.i32.i32(i32 3, <vscale x 4 x double> %1, <vscale x 4 x float> %0, i32 1, i32 %2)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @wide_ternary_ivw_ro(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf64>, %arg2: i32) {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i32
+  "vcix.intrin.wide.ternary.ro"(%1, %arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf64>, i32) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x double> @wide_ternary_ivv(<vscale x 4 x float> %0, <vscale x 4 x double> %1, i32 %2) {
+// CHECK:   %4 = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i32.nxv4f64.nxv4f32.i32.i32(i32 3, <vscale x 4 x double> %1, <vscale x 4 x float> %0, i32 1, i32 %2)
+// CHECK:   ret <vscale x 4 x double> %1
+// CHECK: }
+llvm.func @wide_ternary_ivv(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf64>, %arg2: i32) -> vector<[4]xf64> {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i32
+  %2 = "vcix.intrin.wide.ternary"(%1, %arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (i32, vector<[4]xf32>, vector<[4]xf64>, i32) -> vector<[4]xf64>
+  llvm.return %arg1 : vector<[4]xf64>
+}
+
+// CHECK-LABEL: define void @fixed_binary_vv_ro(<4 x float> %0, <4 x float> %1) {
+// CHECK:   call void @llvm.riscv.sf.vc.vv.se.i32.v4f32.v4f32.i32(i32 3, i32 30, <4 x float> %1, <4 x float> %0, i32 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_binary_vv_ro(%arg0: vector<4xf32>, %arg1: vector<4xf32>) {
+  "vcix.intrin.binary.ro"(%arg0, %arg1) <{opcode = 3 : i32, rd = 30 : i32}> : (vector<4xf32>, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_binary_vv(<4 x float> %0, <4 x float> %1) {
+// CHECK:   %3 = call <4 x float> @llvm.riscv.sf.vc.v.vv.se.v4f32.i32.v4f32.v4f32.i32(i32 3, <4 x float> %1, <4 x float> %0, i32 4)
+// CHECK:   ret <4 x float> %3
+// CHECK: }
+llvm.func @fixed_binary_vv(%arg0: vector<4xf32>, %arg1: vector<4xf32>) -> vector<4xf32> {
+  %0 = "vcix.intrin.binary"(%arg0, %arg1) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+  llvm.return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_binary_xv_ro(i32 %0, <4 x float> %1) {
+// CHECK:   call void @llvm.riscv.sf.vc.xv.se.i32.v4f32.i32.i32(i32 3, i32 30, <4 x float> %1, i32 %0, i32 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_binary_xv_ro(%arg0: i32, %arg1: vector<4xf32>) {
+  "vcix.intrin.binary.ro"(%arg0, %arg1) <{opcode = 3 : i32, rd = 30 : i32}> : (i32, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_binary_xv(i32 %0, <4 x float> %1) {
+// CHECK:   %3 = call <4 x float> @llvm.riscv.sf.vc.v.xv.se.v4f32.i32.v4f32.i32.i32(i32 3, <4 x float> %1, i32 %0, i32 4)
+// CHECK:   ret <4 x float> %3
+// CHECK: }
+llvm.func @fixed_binary_xv(%arg0: i32, %arg1: vector<4xf32>) -> vector<4xf32> {
+  %0 = "vcix.intrin.binary"(%arg0, %arg1) <{opcode = 3 : i32}> : (i32, vector<4xf32>) -> vector<4xf32>
+  llvm.return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_binary_fv_ro(float %0, <4 x float> %1) {
+// CHECK:   call void @llvm.riscv.sf.vc.fv.se.i32.v4f32.f32.i32(i32 1, i32 30, <4 x float> %1, float %0, i32 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_binary_fv_ro(%arg0: f32, %arg1: vector<4xf32>) {
+  "vcix.intrin.binary.ro"(%arg0, %arg1) <{opcode = 1 : i32, rd = 30 : i32}> : (f32, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_binary_fv(float %0, <4 x float> %1) {
+// CHECK:   %3 = call <4 x float> @llvm.riscv.sf.vc.v.fv.se.v4f32.i32.v4f32.f32.i32(i32 1, <4 x float> %1, float %0, i32 4)
+// CHECK:   ret <4 x float> %3
+// CHECK: }
+llvm.func @fixed_binary_fv(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
+  %0 = "vcix.intrin.binary"(%arg0, %arg1) <{opcode = 1 : i32}> : (f32, vector<4xf32>) -> vector<4xf32>
+  llvm.return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_binary_iv_ro(<4 x float> %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.xv.se.i32.v4f32.i32.i32(i32 3, i32 30, <4 x float> %0, i32 1, i32 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_binary_iv_ro(%arg0: vector<4xf32>) {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i32
+  "vcix.intrin.binary.ro"(%1, %arg0) <{opcode = 3 : i32, rd = 30 : i32}> : (i32, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_binary_iv(<4 x float> %0) {
+// CHECK:   %2 = call <4 x float> @llvm.riscv.sf.vc.v.xv.se.v4f32.i32.v4f32.i32.i32(i32 3, <4 x float> %0, i32 1, i32 4)
+// CHECK:   ret <4 x float> %2
+// CHECK: }
+llvm.func @fixed_binary_iv(%arg0: vector<4xf32>) -> vector<4xf32> {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i32
+  %2 = "vcix.intrin.binary"(%1, %arg0) <{opcode = 3 : i32}> : (i32, vector<4xf32>) -> vector<4xf32>
+  llvm.return %2 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_ternary_vvv_ro(<4 x float> %0, <4 x float> %1, <4 x float> %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.vvv.se.i32.v4f32.v4f32.v4f32.i32(i32 3, <4 x float> %2, <4 x float> %1, <4 x float> %0, i32 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_ternary_vvv_ro(%arg0: vector<4xf32>, %arg1: vector<4xf32>, %arg2: vector<4xf32>) {
+  "vcix.intrin.ternary.ro"(%arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_ternary_vvv(<4 x float> %0, <4 x float> %1, <4 x float> %2) {
+// CHECK:   %4 = call <4 x float> @llvm.riscv.sf.vc.v.vvv.se.v4f32.i32.v4f32.v4f32.v4f32.i32(i32 3, <4 x float> %2, <4 x float> %1, <4 x float> %0, i32 4)
+// CHECK:   ret <4 x float> %4
+// CHECK: }
+llvm.func @fixed_ternary_vvv(%arg0: vector<4xf32>, %arg1: vector<4xf32>, %arg2: vector<4xf32>) -> vector<4xf32> {
+  %0 = "vcix.intrin.ternary"(%arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+  llvm.return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_ternary_xvv_ro(i32 %0, <4 x float> %1, <4 x float> %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvv.se.i32.v4f32.v4f32.i32.i32(i32 3, <4 x float> %2, <4 x float> %1, i32 %0, i32 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_ternary_xvv_ro(%arg0: i32, %arg1: vector<4xf32>, %arg2: vector<4xf32>) {
+  "vcix.intrin.ternary.ro"(%arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_ternary_xvv(i32 %0, <4 x float> %1, <4 x float> %2) {
+// CHECK:   %4 = call <4 x float> @llvm.riscv.sf.vc.v.xvv.se.v4f32.i32.v4f32.v4f32.i32.i32(i32 3, <4 x float> %2, <4 x float> %1, i32 %0, i32 4)
+// CHECK:   ret <4 x float> %4
+// CHECK: }
+llvm.func @fixed_ternary_xvv(%arg0: i32, %arg1: vector<4xf32>, %arg2: vector<4xf32>) -> vector<4xf32> {
+  %0 = "vcix.intrin.ternary"(%arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+  llvm.return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_ternary_fvv_ro(float %0, <4 x float> %1, <4 x float> %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.fvv.se.i32.v4f32.v4f32.f32.i32(i32 1, <4 x float> %2, <4 x float> %1, float %0, i32 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_ternary_fvv_ro(%arg0: f32, %arg1: vector<4xf32>, %arg2: vector<4xf32>) {
+  "vcix.intrin.ternary.ro"(%arg0, %arg1, %arg2) <{opcode = 1 : i32}> : (f32, vector<4xf32>, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_ternary_fvv(float %0, <4 x float> %1, <4 x float> %2) {
+// CHECK:   %4 = call <4 x float> @llvm.riscv.sf.vc.v.fvv.se.v4f32.i32.v4f32.v4f32.f32.i32(i32 1, <4 x float> %2, <4 x float> %1, float %0, i32 4)
+// CHECK:   ret <4 x float> %4
+// CHECK: }
+llvm.func @fixed_ternary_fvv(%arg0: f32, %arg1: vector<4xf32>, %arg2: vector<4xf32>) -> vector<4xf32> {
+  %0 = "vcix.intrin.ternary"(%arg0, %arg1, %arg2) <{opcode = 1 : i32}> : (f32, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+  llvm.return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_ternary_ivv_ro(<4 x float> %0, <4 x float> %1) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvv.se.i32.v4f32.v4f32.i32.i32(i32 3, <4 x float> %1, <4 x float> %0, i32 1, i32 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_ternary_ivv_ro(%arg0: vector<4xf32>, %arg1: vector<4xf32>) {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i32
+  "vcix.intrin.ternary.ro"(%1, %arg0, %arg1) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_ternary_ivv(<4 x float> %0, <4 x float> %1) {
+// CHECK:   %3 = call <4 x float> @llvm.riscv.sf.vc.v.xvv.se.v4f32.i32.v4f32.v4f32.i32.i32(i32 3, <4 x float> %1, <4 x float> %0, i32 1, i32 4)
+// CHECK:   ret <4 x float> %3
+// CHECK: }
+llvm.func @fixed_ternary_ivv(%arg0: vector<4xf32>, %arg1: vector<4xf32>) -> vector<4xf32> {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i32
+  %2 = "vcix.intrin.ternary"(%1, %arg0, %arg1) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+  llvm.return %2 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_wide_ternary_vvw_ro(<4 x float> %0, <4 x float> %1, <4 x double> %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.vvw.se.i32.v4f64.v4f32.v4f32.i32(i32 3, <4 x double> %2, <4 x float> %1, <4 x float> %0, i32 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_wide_ternary_vvw_ro(%arg0: vector<4xf32>, %arg1: vector<4xf32>, %arg2: vector<4xf64>) {
+  "vcix.intrin.wide.ternary.ro"(%arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>, vector<4xf64>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x double> @fixed_wide_ternary_vvw(<4 x float> %0, <4 x float> %1, <4 x double> %2) {
+// CHECK:   %4 = call <4 x double> @llvm.riscv.sf.vc.v.vvw.se.v4f64.i32.v4f64.v4f32.v4f32.i32(i32 3, <4 x double> %2, <4 x float> %1, <4 x float> %0, i32 4)
+// CHECK:   ret <4 x double> %4
+// CHECK: }
+llvm.func @fixed_wide_ternary_vvw(%arg0: vector<4xf32>, %arg1: vector<4xf32>, %arg2: vector<4xf64>) -> vector<4xf64> {
+  %0 = "vcix.intrin.wide.ternary"(%arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+  llvm.return %0 : vector<4xf64>
+}
+
+// CHECK-LABEL: define void @fixed_wide_ternary_xvw_ro(i32 %0, <4 x float> %1, <4 x double> %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvw.se.i32.v4f64.v4f32.i32.i32(i32 3, <4 x double> %2, <4 x float> %1, i32 %0, i32 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_wide_ternary_xvw_ro(%arg0: i32, %arg1: vector<4xf32>, %arg2: vector<4xf64>) {
+  "vcix.intrin.wide.ternary.ro"(%arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf64>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x double> @fixed_wide_ternary_xvw(i32 %0, <4 x float> %1, <4 x double> %2) {
+// CHECK:   %4 = call <4 x double> @llvm.riscv.sf.vc.v.xvw.se.v4f64.i32.v4f64.v4f32.i32.i32(i32 3, <4 x double> %2, <4 x float> %1, i32 %0, i32 4)
+// CHECK:   ret <4 x double> %4
+// CHECK: }
+llvm.func @fixed_wide_ternary_xvw(%arg0: i32, %arg1: vector<4xf32>, %arg2: vector<4xf64>) -> vector<4xf64> {
+  %0 = "vcix.intrin.wide.ternary"(%arg0, %arg1, %arg2) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+  llvm.return %0 : vector<4xf64>
+}
+
+// CHECK-LABEL: define void @fixed_wide_ternary_fvw_ro(float %0, <4 x float> %1, <4 x double> %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.fvw.se.i32.v4f64.v4f32.f32.i32(i32 1, <4 x double> %2, <4 x float> %1, float %0, i32 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_wide_ternary_fvw_ro(%arg0: f32, %arg1: vector<4xf32>, %arg2: vector<4xf64>) {
+  "vcix.intrin.wide.ternary.ro"(%arg0, %arg1, %arg2) <{opcode = 1 : i32}> : (f32, vector<4xf32>, vector<4xf64>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x double> @fixed_wide_ternary_fvw(float %0, <4 x float> %1, <4 x double> %2) {
+// CHECK:   %4 = call <4 x double> @llvm.riscv.sf.vc.v.fvw.se.v4f64.i32.v4f64.v4f32.f32.i32(i32 1, <4 x double> %2, <4 x float> %1, float %0, i32 4)
+// CHECK:   ret <4 x double> %2
+// CHECK: }
+llvm.func @fixed_wide_ternary_fvw(%arg0: f32, %arg1: vector<4xf32>, %arg2: vector<4xf64>) -> vector<4xf64> {
+  %0 = "vcix.intrin.wide.ternary"(%arg0, %arg1, %arg2) <{opcode = 1 : i32}> : (f32, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+  llvm.return %arg2 : vector<4xf64>
+}
+
+// CHECK-LABEL: define void @fixed_wide_ternary_ivw_ro(<4 x float> %0, <4 x double> %1) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvw.se.i32.v4f64.v4f32.i32.i32(i32 3, <4 x double> %1, <4 x float> %0, i32 1, i32 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_wide_ternary_ivw_ro(%arg0: vector<4xf32>, %arg1: vector<4xf64>) {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i32
+  "vcix.intrin.wide.ternary.ro"(%1, %arg0, %arg1) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf64>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x double> @fixed_wide_ternary_ivv(<4 x float> %0, <4 x double> %1) {
+// CHECK:   %3 = call <4 x double> @llvm.riscv.sf.vc.v.xvw.se.v4f64.i32.v4f64.v4f32.i32.i32(i32 3, <4 x double> %1, <4 x float> %0, i32 1, i32 4)
+// CHECK:   ret <4 x double> %1
+// CHECK: }
+llvm.func @fixed_wide_ternary_ivv(%arg0: vector<4xf32>, %arg1: vector<4xf64>) -> vector<4xf64> {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i32
+  %2 = "vcix.intrin.wide.ternary"(%1, %arg0, %arg1) <{opcode = 3 : i32}> : (i32, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+  llvm.return %arg1 : vector<4xf64>
+}
diff --git a/mlir/test/Target/LLVMIR/vcix-rv64.mlir b/mlir/test/Target/LLVMIR/vcix-rv64.mlir
new file mode 100644
index 0000000000000..81cbc8c8887f7
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/vcix-rv64.mlir
@@ -0,0 +1,897 @@
+// RUN: mlir-translate --mlir-to-llvmir %s | FileCheck %s
+
+// CHECK-LABEL: define void @unary_ro_e8mf8(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8mf8.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8mf8(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 0 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e8mf4(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8mf4.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8mf4(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 1 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e8mf2(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8mf2.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8mf2(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 2 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e8m1(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8m1.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8m1(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 3 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e8m2(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8m2.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8m2(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 4 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e8m4(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8m4.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8m4(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 5 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e8m8(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e8m8.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e8m8(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 6 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 1 x i8> @unary_e8mf8(i64 %0) {
+// CHECK:   %2 = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 1 x i8> %2
+// CHECK: }
+llvm.func @unary_e8mf8(%arg0: i64) -> vector<[1]xi8> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[1]xi8>
+  llvm.return %1 : vector<[1]xi8>
+}
+
+// CHECK-LABEL: define <vscale x 2 x i8> @unary_e8mf4(i64 %0) {
+// CHECK:   %2 = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 2 x i8> %2
+// CHECK: }
+llvm.func @unary_e8mf4(%arg0: i64) -> vector<[2]xi8> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[2]xi8>
+  llvm.return %1 : vector<[2]xi8>
+}
+
+// CHECK-LABEL: define <vscale x 4 x i8> @unary_e8mf2(i64 %0) {
+// CHECK:   %2 = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 4 x i8> %2
+// CHECK: }
+llvm.func @unary_e8mf2(%arg0: i64) -> vector<[4]xi8> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[4]xi8>
+  llvm.return %1 : vector<[4]xi8>
+}
+
+// CHECK-LABEL: define <vscale x 8 x i8> @unary_e8m1(i64 %0) {
+// CHECK:   %2 = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 8 x i8> %2
+// CHECK: }
+llvm.func @unary_e8m1(%arg0: i64) -> vector<[8]xi8> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[8]xi8>
+  llvm.return %1 : vector<[8]xi8>
+}
+
+// CHECK-LABEL: define <vscale x 16 x i8> @unary_e8m2(i64 %0) {
+// CHECK:   %2 = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 16 x i8> %2
+// CHECK: }
+llvm.func @unary_e8m2(%arg0: i64) -> vector<[16]xi8> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[16]xi8>
+  llvm.return %1 : vector<[16]xi8>
+}
+
+// CHECK-LABEL: define <vscale x 32 x i8> @unary_e8m4(i64 %0) {
+// CHECK:   %2 = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 32 x i8> %2
+// CHECK: }
+llvm.func @unary_e8m4(%arg0: i64) -> vector<[32]xi8> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[32]xi8>
+  llvm.return %1 : vector<[32]xi8>
+}
+
+// CHECK-LABEL: define <vscale x 64 x i8> @unary_e8m8(i64 %0) {
+// CHECK:   %2 = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 64 x i8> %2
+// CHECK: }
+llvm.func @unary_e8m8(%arg0: i64) -> vector<[64]xi8> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[64]xi8>
+  llvm.return %1 : vector<[64]xi8>
+}
+
+// CHECK-LABEL: define void @unary_ro_e16mf4(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e16mf4.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e16mf4(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 7 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e16mf2(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e16mf2.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e16mf2(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 8 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e16m1(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e16m1.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e16m1(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 9 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e16m2(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e16m2.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e16m2(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 10 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e16m4(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e16m4.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e16m4(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 11 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e16m8(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e16m8.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e16m8(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 12 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 1 x i16> @unary_e16mf4(i64 %0) {
+// CHECK:   %2 = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 1 x i16> %2
+// CHECK: }
+llvm.func @unary_e16mf4(%arg0: i64) -> vector<[1]xi16> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[1]xi16>
+  llvm.return %1 : vector<[1]xi16>
+}
+
+// CHECK-LABEL: define <vscale x 2 x i16> @unary_e16mf2(i64 %0) {
+// CHECK:   %2 = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 2 x i16> %2
+// CHECK: }
+llvm.func @unary_e16mf2(%arg0: i64) -> vector<[2]xi16> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[2]xi16>
+  llvm.return %1 : vector<[2]xi16>
+}
+
+// CHECK-LABEL: define <vscale x 4 x i16> @unary_e16m1(i64 %0) {
+// CHECK:   %2 = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 4 x i16> %2
+// CHECK: }
+llvm.func @unary_e16m1(%arg0: i64) -> vector<[4]xi16> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[4]xi16>
+  llvm.return %1 : vector<[4]xi16>
+}
+
+// CHECK-LABEL: define <vscale x 8 x i16> @unary_e16m2(i64 %0) {
+// CHECK:   %2 = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 8 x i16> %2
+// CHECK: }
+llvm.func @unary_e16m2(%arg0: i64) -> vector<[8]xi16> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[8]xi16>
+  llvm.return %1 : vector<[8]xi16>
+}
+
+// CHECK-LABEL: define <vscale x 16 x i16> @unary_e16m4(i64 %0) {
+// CHECK:   %2 = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 16 x i16> %2
+// CHECK: }
+llvm.func @unary_e16m4(%arg0: i64) -> vector<[16]xi16> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[16]xi16>
+  llvm.return %1 : vector<[16]xi16>
+}
+
+// CHECK-LABEL: define <vscale x 32 x i16> @unary_e16m8(i64 %0) {
+// CHECK:   %2 = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 32 x i16> %2
+// CHECK: }
+llvm.func @unary_e16m8(%arg0: i64) -> vector<[32]xi16> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[32]xi16>
+  llvm.return %1 : vector<[32]xi16>
+}
+
+// CHECK-LABEL: define void @unary_ro_e32mf2(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e32mf2.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e32mf2(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 13 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e32m1(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e32m1.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e32m1(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 14 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e32m2(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e32m2.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e32m2(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 15 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e32m4(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e32m4.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e32m4(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 16 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e32m8(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e32m8.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e32m8(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 17 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 1 x i32> @unary_e32mf2(i64 %0) {
+// CHECK:   %2 = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 1 x i32> %2
+// CHECK: }
+llvm.func @unary_e32mf2(%arg0: i64) -> vector<[1]xi32> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[1]xi32>
+  llvm.return %1 : vector<[1]xi32>
+}
+
+// CHECK-LABEL: define <vscale x 2 x i32> @unary_e32m1(i64 %0) {
+// CHECK:   %2 = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 2 x i32> %2
+// CHECK: }
+llvm.func @unary_e32m1(%arg0: i64) -> vector<[2]xi32> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[2]xi32>
+  llvm.return %1 : vector<[2]xi32>
+}
+
+// CHECK-LABEL: define <vscale x 4 x i32> @unary_e32m2(i64 %0) {
+// CHECK:   %2 = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 4 x i32> %2
+// CHECK: }
+llvm.func @unary_e32m2(%arg0: i64) -> vector<[4]xi32> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[4]xi32>
+  llvm.return %1 : vector<[4]xi32>
+}
+
+// CHECK-LABEL: define <vscale x 8 x i32> @unary_e32m4(i64 %0) {
+// CHECK:   %2 = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 8 x i32> %2
+// CHECK: }
+llvm.func @unary_e32m4(%arg0: i64) -> vector<[8]xi32> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[8]xi32>
+  llvm.return %1 : vector<[8]xi32>
+}
+
+// CHECK-LABEL: define <vscale x 16 x i32> @unary_e32m8(i64 %0) {
+// CHECK:   %2 = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 16 x i32> %2
+// CHECK: }
+llvm.func @unary_e32m8(%arg0: i64) -> vector<[16]xi32> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[16]xi32>
+  llvm.return %1 : vector<[16]xi32>
+}
+
+// CHECK-LABEL: define void @unary_ro_e64m1(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e64m1.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e64m1(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 18 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e64m2(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e64m2.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e64m2(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 19 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e64m4(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e64m4.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e64m4(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 20 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define void @unary_ro_e64m8(i64 %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.i.se.e64m8.i64.i64.i64(i64 3, i64 31, i64 30, i64 0, i64 %0)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @unary_ro_e64m8(%arg0: i64) {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  "vcix.intrin.unary.ro"(%0, %arg0) <{opcode = 3 : i64, rd = 30 : i64, rs2 = 31 : i64, sew_lmul = 21 : i32}> : (i64, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 1 x i32> @unary_e64m1(i64 %0) {
+// CHECK:   %2 = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 1 x i32> %2
+// CHECK: }
+llvm.func @unary_e64m1(%arg0: i64) -> vector<[1]xi32> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[1]xi32>
+  llvm.return %1 : vector<[1]xi32>
+}
+
+// CHECK-LABEL: define <vscale x 2 x i32> @unary_e64m2(i64 %0) {
+// CHECK:   %2 = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 2 x i32> %2
+// CHECK: }
+llvm.func @unary_e64m2(%arg0: i64) -> vector<[2]xi32> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[2]xi32>
+  llvm.return %1 : vector<[2]xi32>
+}
+
+// CHECK-LABEL: define <vscale x 4 x i32> @unary_e64m4(i64 %0) {
+// CHECK:   %2 = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 4 x i32> %2
+// CHECK: }
+llvm.func @unary_e64m4(%arg0: i64) -> vector<[4]xi32> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[4]xi32>
+  llvm.return %1 : vector<[4]xi32>
+}
+
+// CHECK-LABEL: define <vscale x 8 x i32> @unary_e64m8(i64 %0) {
+// CHECK:   %2 = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.i64.i64.i64(i64 3, i64 31, i64 0, i64 %0)
+// CHECK:   ret <vscale x 8 x i32> %2
+// CHECK: }
+llvm.func @unary_e64m8(%arg0: i64) -> vector<[8]xi32> {
+  %0 = llvm.mlir.constant(0 : i64) : i64
+  %1 = "vcix.intrin.unary"(%0, %arg0) <{opcode = 3 : i64, rs2 = 31 : i64}> : (i64, i64) -> vector<[8]xi32>
+  llvm.return %1 : vector<[8]xi32>
+}
+
+// CHECK-LABEL: define void @binary_vv_ro(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.vv.se.i64.nxv4f32.nxv4f32.i64(i64 3, i64 30, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i64 %2)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @binary_vv_ro(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: i64) {
+  "vcix.intrin.binary.ro"(%arg0, %arg1, %arg2) <{opcode = 3 : i64, rd = 30 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_vv(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) {
+// CHECK:   %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.i64.nxv4f32.nxv4f32.i64(i64 3, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i64 %2)
+// CHECK:   ret <vscale x 4 x float> %4
+// CHECK: }
+llvm.func @binary_vv(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: i64) -> vector<[4]xf32> {
+  %0 = "vcix.intrin.binary"(%arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+  llvm.return %0 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @binary_xv_ro(i64 %0, <vscale x 4 x float> %1, i64 %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.xv.se.i64.nxv4f32.i64.i64(i64 3, i64 30, <vscale x 4 x float> %1, i64 %0, i64 %2)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @binary_xv_ro(%arg0: i64, %arg1: vector<[4]xf32>, %arg2: i64) {
+  "vcix.intrin.binary.ro"(%arg0, %arg1, %arg2) <{opcode = 3 : i64, rd = 30 : i64}> : (i64, vector<[4]xf32>, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_xv(i64 %0, <vscale x 4 x float> %1, i64 %2) {
+// CHECK:   %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i64.nxv4f32.i64.i64(i64 3, <vscale x 4 x float> %1, i64 %0, i64 %2)
+// CHECK:   ret <vscale x 4 x float> %4
+// CHECK: }
+llvm.func @binary_xv(%arg0: i64, %arg1: vector<[4]xf32>, %arg2: i64) -> vector<[4]xf32> {
+  %0 = "vcix.intrin.binary"(%arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, i64) -> vector<[4]xf32>
+  llvm.return %0 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @binary_fv_ro(float %0, <vscale x 4 x float> %1, i64 %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.fv.se.i64.nxv4f32.f32.i64(i64 1, i64 30, <vscale x 4 x float> %1, float %0, i64 %2)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @binary_fv_ro(%arg0: f32, %arg1: vector<[4]xf32>, %arg2: i64) {
+  "vcix.intrin.binary.ro"(%arg0, %arg1, %arg2) <{opcode = 1 : i64, rd = 30 : i64}> : (f32, vector<[4]xf32>, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_fv(float %0, <vscale x 4 x float> %1, i64 %2) {
+// CHECK:   %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.i64.nxv4f32.f32.i64(i64 1, <vscale x 4 x float> %1, float %0, i64 %2)
+// CHECK:   ret <vscale x 4 x float> %4
+// CHECK: }
+llvm.func @binary_fv(%arg0: f32, %arg1: vector<[4]xf32>, %arg2: i64) -> vector<[4]xf32> {
+  %0 = "vcix.intrin.binary"(%arg0, %arg1, %arg2) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, i64) -> vector<[4]xf32>
+  llvm.return %0 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @binary_iv_ro(<vscale x 4 x float> %0, i64 %1) {
+// CHECK:   call void @llvm.riscv.sf.vc.xv.se.i64.nxv4f32.i64.i64(i64 3, i64 30, <vscale x 4 x float> %0, i64 1, i64 %1)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @binary_iv_ro(%arg0: vector<[4]xf32>, %arg1: i64) {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i64
+  "vcix.intrin.binary.ro"(%1, %arg0, %arg1) <{opcode = 3 : i64, rd = 30 : i64}> : (i64, vector<[4]xf32>, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_iv(<vscale x 4 x float> %0, i64 %1) {
+// CHECK:   %3 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i64.nxv4f32.i64.i64(i64 3, <vscale x 4 x float> %0, i64 1, i64 %1)
+// CHECK:   ret <vscale x 4 x float> %3
+// CHECK: }
+llvm.func @binary_iv(%arg0: vector<[4]xf32>, %arg1: i64) -> vector<[4]xf32> {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i64
+  %2 = "vcix.intrin.binary"(%1, %arg0, %arg1) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, i64) -> vector<[4]xf32>
+  llvm.return %2 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @ternary_vvv_ro(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) {
+// CHECK:   call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4f32.nxv4f32.nxv4f32.i64(i64 3, <vscale x 4 x float> %2, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i64 %3)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @ternary_vvv_ro(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf32>, %arg3: i64) {
+  "vcix.intrin.ternary.ro"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @ternary_vvv(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) {
+// CHECK:   %5 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.i64.nxv4f32.nxv4f32.nxv4f32.i64(i64 3, <vscale x 4 x float> %2, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i64 %3)
+// CHECK:   ret <vscale x 4 x float> %5
+// CHECK: }
+llvm.func @ternary_vvv(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf32>, %arg3: i64) -> vector<[4]xf32> {
+  %0 = "vcix.intrin.ternary"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+  llvm.return %0 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @ternary_xvv_ro(i64 %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4f32.nxv4f32.i64.i64(i64 3, <vscale x 4 x float> %2, <vscale x 4 x float> %1, i64 %0, i64 %3)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @ternary_xvv_ro(%arg0: i64, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf32>, %arg3: i64) {
+  "vcix.intrin.ternary.ro"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @ternary_xvv(i64 %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) {
+// CHECK:   %5 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.i64.nxv4f32.nxv4f32.i64.i64(i64 3, <vscale x 4 x float> %2, <vscale x 4 x float> %1, i64 %0, i64 %3)
+// CHECK:   ret <vscale x 4 x float> %5
+// CHECK: }
+llvm.func @ternary_xvv(%arg0: i64, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf32>, %arg3: i64) -> vector<[4]xf32> {
+  %0 = "vcix.intrin.ternary"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+  llvm.return %0 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @ternary_fvv_ro(float %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) {
+// CHECK:   call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4f32.nxv4f32.f32.i64(i64 1, <vscale x 4 x float> %2, <vscale x 4 x float> %1, float %0, i64 %3)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @ternary_fvv_ro(%arg0: f32, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf32>, %arg3: i64) {
+  "vcix.intrin.ternary.ro"(%arg0, %arg1, %arg2, %arg3) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @ternary_fvv(float %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) {
+// CHECK:   %5 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.i64.nxv4f32.nxv4f32.f32.i64(i64 1, <vscale x 4 x float> %2, <vscale x 4 x float> %1, float %0, i64 %3)
+// CHECK:   ret <vscale x 4 x float> %5
+// CHECK: }
+llvm.func @ternary_fvv(%arg0: f32, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf32>, %arg3: i64) -> vector<[4]xf32> {
+  %0 = "vcix.intrin.ternary"(%arg0, %arg1, %arg2, %arg3) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+  llvm.return %0 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @ternary_ivv_ro(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4f32.nxv4f32.i64.i64(i64 3, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i64 1, i64 %2)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @ternary_ivv_ro(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: i64) {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i64
+  "vcix.intrin.ternary.ro"(%1, %arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf32>, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x float> @ternary_ivv(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) {
+// CHECK:   %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.i64.nxv4f32.nxv4f32.i64.i64(i64 3, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i64 1, i64 %2)
+// CHECK:   ret <vscale x 4 x float> %4
+// CHECK: }
+llvm.func @ternary_ivv(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: i64) -> vector<[4]xf32> {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i64
+  %2 = "vcix.intrin.ternary"(%1, %arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+  llvm.return %2 : vector<[4]xf32>
+}
+
+// CHECK-LABEL: define void @wide_ternary_vvw_ro(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x double> %2, i64 %3) {
+// CHECK:   call void @llvm.riscv.sf.vc.vvw.se.i64.nxv4f64.nxv4f32.nxv4f32.i64(i64 3, <vscale x 4 x double> %2, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i64 %3)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @wide_ternary_vvw_ro(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf64>, %arg3: i64) {
+  "vcix.intrin.wide.ternary.ro"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf64>, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x double> @wide_ternary_vvw(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x double> %2, i64 %3) {
+// CHECK:   %5 = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.i64.nxv4f64.nxv4f32.nxv4f32.i64(i64 3, <vscale x 4 x double> %2, <vscale x 4 x float> %1, <vscale x 4 x float> %0, i64 %3)
+// CHECK:   ret <vscale x 4 x double> %5
+// CHECK: }
+llvm.func @wide_ternary_vvw(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf64>, %arg3: i64) -> vector<[4]xf64> {
+  %0 = "vcix.intrin.wide.ternary"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, vector<[4]xf64>, i64) -> vector<[4]xf64>
+  llvm.return %0 : vector<[4]xf64>
+}
+
+// CHECK-LABEL: define void @wide_ternary_xvw_ro(i64 %0, <vscale x 4 x float> %1, <vscale x 4 x double> %2, i64 %3) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvw.se.i64.nxv4f64.nxv4f32.i64.i64(i64 3, <vscale x 4 x double> %2, <vscale x 4 x float> %1, i64 %0, i64 %3)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @wide_ternary_xvw_ro(%arg0: i64, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf64>, %arg3: i64) {
+  "vcix.intrin.wide.ternary.ro"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf64>, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x double> @wide_ternary_xvw(i64 %0, <vscale x 4 x float> %1, <vscale x 4 x double> %2, i64 %3) {
+// CHECK:   %5 = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i64.nxv4f64.nxv4f32.i64.i64(i64 3, <vscale x 4 x double> %2, <vscale x 4 x float> %1, i64 %0, i64 %3)
+// CHECK:   ret <vscale x 4 x double> %5
+// CHECK: }
+llvm.func @wide_ternary_xvw(%arg0: i64, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf64>, %arg3: i64) -> vector<[4]xf64> {
+  %0 = "vcix.intrin.wide.ternary"(%arg0, %arg1, %arg2, %arg3) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf64>, i64) -> vector<[4]xf64>
+  llvm.return %0 : vector<[4]xf64>
+}
+
+// CHECK-LABEL: define void @wide_ternary_fvw_ro(float %0, <vscale x 4 x float> %1, <vscale x 4 x double> %2, i64 %3) {
+// CHECK:   call void @llvm.riscv.sf.vc.fvw.se.i64.nxv4f64.nxv4f32.f32.i64(i64 1, <vscale x 4 x double> %2, <vscale x 4 x float> %1, float %0, i64 %3)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @wide_ternary_fvw_ro(%arg0: f32, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf64>, %arg3: i64) {
+  "vcix.intrin.wide.ternary.ro"(%arg0, %arg1, %arg2, %arg3) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, vector<[4]xf64>, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x double> @wide_ternary_fvw(float %0, <vscale x 4 x float> %1, <vscale x 4 x double> %2, i64 %3) {
+// CHECK:   %5 = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.i64.nxv4f64.nxv4f32.f32.i64(i64 1, <vscale x 4 x double> %2, <vscale x 4 x float> %1, float %0, i64 %3)
+// CHECK:   ret <vscale x 4 x double> %2
+// CHECK: }
+llvm.func @wide_ternary_fvw(%arg0: f32, %arg1: vector<[4]xf32>, %arg2: vector<[4]xf64>, %arg3: i64) -> vector<[4]xf64> {
+  %0 = "vcix.intrin.wide.ternary"(%arg0, %arg1, %arg2, %arg3) <{opcode = 1 : i64}> : (f32, vector<[4]xf32>, vector<[4]xf64>, i64) -> vector<[4]xf64>
+  llvm.return %arg2 : vector<[4]xf64>
+}
+
+// CHECK-LABEL: define void @wide_ternary_ivw_ro(<vscale x 4 x float> %0, <vscale x 4 x double> %1, i64 %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvw.se.i64.nxv4f64.nxv4f32.i64.i64(i64 3, <vscale x 4 x double> %1, <vscale x 4 x float> %0, i64 1, i64 %2)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @wide_ternary_ivw_ro(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf64>, %arg2: i64) {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i64
+  "vcix.intrin.wide.ternary.ro"(%1, %arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf64>, i64) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <vscale x 4 x double> @wide_ternary_ivv(<vscale x 4 x float> %0, <vscale x 4 x double> %1, i64 %2) {
+// CHECK:   %4 = call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i64.nxv4f64.nxv4f32.i64.i64(i64 3, <vscale x 4 x double> %1, <vscale x 4 x float> %0, i64 1, i64 %2)
+// CHECK:   ret <vscale x 4 x double> %1
+// CHECK: }
+llvm.func @wide_ternary_ivv(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf64>, %arg2: i64) -> vector<[4]xf64> {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i64
+  %2 = "vcix.intrin.wide.ternary"(%1, %arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (i64, vector<[4]xf32>, vector<[4]xf64>, i64) -> vector<[4]xf64>
+  llvm.return %arg1 : vector<[4]xf64>
+}
+
+// CHECK-LABEL: define void @fixed_binary_vv_ro(<4 x float> %0, <4 x float> %1) {
+// CHECK:   call void @llvm.riscv.sf.vc.vv.se.i64.v4f32.v4f32.i64(i64 3, i64 30, <4 x float> %1, <4 x float> %0, i64 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_binary_vv_ro(%arg0: vector<4xf32>, %arg1: vector<4xf32>) {
+  "vcix.intrin.binary.ro"(%arg0, %arg1) <{opcode = 3 : i64, rd = 30 : i64}> : (vector<4xf32>, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_binary_vv(<4 x float> %0, <4 x float> %1) {
+// CHECK:   %3 = call <4 x float> @llvm.riscv.sf.vc.v.vv.se.v4f32.i64.v4f32.v4f32.i64(i64 3, <4 x float> %1, <4 x float> %0, i64 4)
+// CHECK:   ret <4 x float> %3
+// CHECK: }
+llvm.func @fixed_binary_vv(%arg0: vector<4xf32>, %arg1: vector<4xf32>) -> vector<4xf32> {
+  %0 = "vcix.intrin.binary"(%arg0, %arg1) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+  llvm.return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_binary_xv_ro(i64 %0, <4 x float> %1) {
+// CHECK:   call void @llvm.riscv.sf.vc.xv.se.i64.v4f32.i64.i64(i64 3, i64 30, <4 x float> %1, i64 %0, i64 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_binary_xv_ro(%arg0: i64, %arg1: vector<4xf32>) {
+  "vcix.intrin.binary.ro"(%arg0, %arg1) <{opcode = 3 : i64, rd = 30 : i64}> : (i64, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_binary_xv(i64 %0, <4 x float> %1) {
+// CHECK:   %3 = call <4 x float> @llvm.riscv.sf.vc.v.xv.se.v4f32.i64.v4f32.i64.i64(i64 3, <4 x float> %1, i64 %0, i64 4)
+// CHECK:   ret <4 x float> %3
+// CHECK: }
+llvm.func @fixed_binary_xv(%arg0: i64, %arg1: vector<4xf32>) -> vector<4xf32> {
+  %0 = "vcix.intrin.binary"(%arg0, %arg1) <{opcode = 3 : i64}> : (i64, vector<4xf32>) -> vector<4xf32>
+  llvm.return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_binary_fv_ro(float %0, <4 x float> %1) {
+// CHECK:   call void @llvm.riscv.sf.vc.fv.se.i64.v4f32.f32.i64(i64 1, i64 30, <4 x float> %1, float %0, i64 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_binary_fv_ro(%arg0: f32, %arg1: vector<4xf32>) {
+  "vcix.intrin.binary.ro"(%arg0, %arg1) <{opcode = 1 : i64, rd = 30 : i64}> : (f32, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_binary_fv(float %0, <4 x float> %1) {
+// CHECK:   %3 = call <4 x float> @llvm.riscv.sf.vc.v.fv.se.v4f32.i64.v4f32.f32.i64(i64 1, <4 x float> %1, float %0, i64 4)
+// CHECK:   ret <4 x float> %3
+// CHECK: }
+llvm.func @fixed_binary_fv(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
+  %0 = "vcix.intrin.binary"(%arg0, %arg1) <{opcode = 1 : i64}> : (f32, vector<4xf32>) -> vector<4xf32>
+  llvm.return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_binary_iv_ro(<4 x float> %0) {
+// CHECK:   call void @llvm.riscv.sf.vc.xv.se.i64.v4f32.i64.i64(i64 3, i64 30, <4 x float> %0, i64 1, i64 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_binary_iv_ro(%arg0: vector<4xf32>) {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i64
+  "vcix.intrin.binary.ro"(%1, %arg0) <{opcode = 3 : i64, rd = 30 : i64}> : (i64, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_binary_iv(<4 x float> %0) {
+// CHECK:   %2 = call <4 x float> @llvm.riscv.sf.vc.v.xv.se.v4f32.i64.v4f32.i64.i64(i64 3, <4 x float> %0, i64 1, i64 4)
+// CHECK:   ret <4 x float> %2
+// CHECK: }
+llvm.func @fixed_binary_iv(%arg0: vector<4xf32>) -> vector<4xf32> {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i64
+  %2 = "vcix.intrin.binary"(%1, %arg0) <{opcode = 3 : i64}> : (i64, vector<4xf32>) -> vector<4xf32>
+  llvm.return %2 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_ternary_vvv_ro(<4 x float> %0, <4 x float> %1, <4 x float> %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.vvv.se.i64.v4f32.v4f32.v4f32.i64(i64 3, <4 x float> %2, <4 x float> %1, <4 x float> %0, i64 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_ternary_vvv_ro(%arg0: vector<4xf32>, %arg1: vector<4xf32>, %arg2: vector<4xf32>) {
+  "vcix.intrin.ternary.ro"(%arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_ternary_vvv(<4 x float> %0, <4 x float> %1, <4 x float> %2) {
+// CHECK:   %4 = call <4 x float> @llvm.riscv.sf.vc.v.vvv.se.v4f32.i64.v4f32.v4f32.v4f32.i64(i64 3, <4 x float> %2, <4 x float> %1, <4 x float> %0, i64 4)
+// CHECK:   ret <4 x float> %4
+// CHECK: }
+llvm.func @fixed_ternary_vvv(%arg0: vector<4xf32>, %arg1: vector<4xf32>, %arg2: vector<4xf32>) -> vector<4xf32> {
+  %0 = "vcix.intrin.ternary"(%arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+  llvm.return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_ternary_xvv_ro(i64 %0, <4 x float> %1, <4 x float> %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvv.se.i64.v4f32.v4f32.i64.i64(i64 3, <4 x float> %2, <4 x float> %1, i64 %0, i64 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_ternary_xvv_ro(%arg0: i64, %arg1: vector<4xf32>, %arg2: vector<4xf32>) {
+  "vcix.intrin.ternary.ro"(%arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_ternary_xvv(i64 %0, <4 x float> %1, <4 x float> %2) {
+// CHECK:   %4 = call <4 x float> @llvm.riscv.sf.vc.v.xvv.se.v4f32.i64.v4f32.v4f32.i64.i64(i64 3, <4 x float> %2, <4 x float> %1, i64 %0, i64 4)
+// CHECK:   ret <4 x float> %4
+// CHECK: }
+llvm.func @fixed_ternary_xvv(%arg0: i64, %arg1: vector<4xf32>, %arg2: vector<4xf32>) -> vector<4xf32> {
+  %0 = "vcix.intrin.ternary"(%arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+  llvm.return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_ternary_fvv_ro(float %0, <4 x float> %1, <4 x float> %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.fvv.se.i64.v4f32.v4f32.f32.i64(i64 1, <4 x float> %2, <4 x float> %1, float %0, i64 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_ternary_fvv_ro(%arg0: f32, %arg1: vector<4xf32>, %arg2: vector<4xf32>) {
+  "vcix.intrin.ternary.ro"(%arg0, %arg1, %arg2) <{opcode = 1 : i64}> : (f32, vector<4xf32>, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_ternary_fvv(float %0, <4 x float> %1, <4 x float> %2) {
+// CHECK:   %4 = call <4 x float> @llvm.riscv.sf.vc.v.fvv.se.v4f32.i64.v4f32.v4f32.f32.i64(i64 1, <4 x float> %2, <4 x float> %1, float %0, i64 4)
+// CHECK:   ret <4 x float> %4
+// CHECK: }
+llvm.func @fixed_ternary_fvv(%arg0: f32, %arg1: vector<4xf32>, %arg2: vector<4xf32>) -> vector<4xf32> {
+  %0 = "vcix.intrin.ternary"(%arg0, %arg1, %arg2) <{opcode = 1 : i64}> : (f32, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+  llvm.return %0 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_ternary_ivv_ro(<4 x float> %0, <4 x float> %1) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvv.se.i64.v4f32.v4f32.i64.i64(i64 3, <4 x float> %1, <4 x float> %0, i64 1, i64 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_ternary_ivv_ro(%arg0: vector<4xf32>, %arg1: vector<4xf32>) {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i64
+  "vcix.intrin.ternary.ro"(%1, %arg0, %arg1) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf32>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x float> @fixed_ternary_ivv(<4 x float> %0, <4 x float> %1) {
+// CHECK:   %3 = call <4 x float> @llvm.riscv.sf.vc.v.xvv.se.v4f32.i64.v4f32.v4f32.i64.i64(i64 3, <4 x float> %1, <4 x float> %0, i64 1, i64 4)
+// CHECK:   ret <4 x float> %3
+// CHECK: }
+llvm.func @fixed_ternary_ivv(%arg0: vector<4xf32>, %arg1: vector<4xf32>) -> vector<4xf32> {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i64
+  %2 = "vcix.intrin.ternary"(%1, %arg0, %arg1) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+  llvm.return %2 : vector<4xf32>
+}
+
+// CHECK-LABEL: define void @fixed_wide_ternary_vvw_ro(<4 x float> %0, <4 x float> %1, <4 x double> %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.vvw.se.i64.v4f64.v4f32.v4f32.i64(i64 3, <4 x double> %2, <4 x float> %1, <4 x float> %0, i64 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_wide_ternary_vvw_ro(%arg0: vector<4xf32>, %arg1: vector<4xf32>, %arg2: vector<4xf64>) {
+  "vcix.intrin.wide.ternary.ro"(%arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>, vector<4xf64>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x double> @fixed_wide_ternary_vvw(<4 x float> %0, <4 x float> %1, <4 x double> %2) {
+// CHECK:   %4 = call <4 x double> @llvm.riscv.sf.vc.v.vvw.se.v4f64.i64.v4f64.v4f32.v4f32.i64(i64 3, <4 x double> %2, <4 x float> %1, <4 x float> %0, i64 4)
+// CHECK:   ret <4 x double> %4
+// CHECK: }
+llvm.func @fixed_wide_ternary_vvw(%arg0: vector<4xf32>, %arg1: vector<4xf32>, %arg2: vector<4xf64>) -> vector<4xf64> {
+  %0 = "vcix.intrin.wide.ternary"(%arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+  llvm.return %0 : vector<4xf64>
+}
+
+// CHECK-LABEL: define void @fixed_wide_ternary_xvw_ro(i64 %0, <4 x float> %1, <4 x double> %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvw.se.i64.v4f64.v4f32.i64.i64(i64 3, <4 x double> %2, <4 x float> %1, i64 %0, i64 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_wide_ternary_xvw_ro(%arg0: i64, %arg1: vector<4xf32>, %arg2: vector<4xf64>) {
+  "vcix.intrin.wide.ternary.ro"(%arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf64>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x double> @fixed_wide_ternary_xvw(i64 %0, <4 x float> %1, <4 x double> %2) {
+// CHECK:   %4 = call <4 x double> @llvm.riscv.sf.vc.v.xvw.se.v4f64.i64.v4f64.v4f32.i64.i64(i64 3, <4 x double> %2, <4 x float> %1, i64 %0, i64 4)
+// CHECK:   ret <4 x double> %4
+// CHECK: }
+llvm.func @fixed_wide_ternary_xvw(%arg0: i64, %arg1: vector<4xf32>, %arg2: vector<4xf64>) -> vector<4xf64> {
+  %0 = "vcix.intrin.wide.ternary"(%arg0, %arg1, %arg2) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+  llvm.return %0 : vector<4xf64>
+}
+
+// CHECK-LABEL: define void @fixed_wide_ternary_fvw_ro(float %0, <4 x float> %1, <4 x double> %2) {
+// CHECK:   call void @llvm.riscv.sf.vc.fvw.se.i64.v4f64.v4f32.f32.i64(i64 1, <4 x double> %2, <4 x float> %1, float %0, i64 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_wide_ternary_fvw_ro(%arg0: f32, %arg1: vector<4xf32>, %arg2: vector<4xf64>) {
+  "vcix.intrin.wide.ternary.ro"(%arg0, %arg1, %arg2) <{opcode = 1 : i64}> : (f32, vector<4xf32>, vector<4xf64>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x double> @fixed_wide_ternary_fvw(float %0, <4 x float> %1, <4 x double> %2) {
+// CHECK:   %4 = call <4 x double> @llvm.riscv.sf.vc.v.fvw.se.v4f64.i64.v4f64.v4f32.f32.i64(i64 1, <4 x double> %2, <4 x float> %1, float %0, i64 4)
+// CHECK:   ret <4 x double> %2
+// CHECK: }
+llvm.func @fixed_wide_ternary_fvw(%arg0: f32, %arg1: vector<4xf32>, %arg2: vector<4xf64>) -> vector<4xf64> {
+  %0 = "vcix.intrin.wide.ternary"(%arg0, %arg1, %arg2) <{opcode = 1 : i64}> : (f32, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+  llvm.return %arg2 : vector<4xf64>
+}
+
+// CHECK-LABEL: define void @fixed_wide_ternary_ivw_ro(<4 x float> %0, <4 x double> %1) {
+// CHECK:   call void @llvm.riscv.sf.vc.xvw.se.i64.v4f64.v4f32.i64.i64(i64 3, <4 x double> %1, <4 x float> %0, i64 1, i64 4)
+// CHECK:   ret void
+// CHECK: }
+llvm.func @fixed_wide_ternary_ivw_ro(%arg0: vector<4xf32>, %arg1: vector<4xf64>) {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i64
+  "vcix.intrin.wide.ternary.ro"(%1, %arg0, %arg1) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf64>) -> ()
+  llvm.return
+}
+
+// CHECK-LABEL: define <4 x double> @fixed_wide_ternary_ivv(<4 x float> %0, <4 x double> %1) {
+// CHECK:   %3 = call <4 x double> @llvm.riscv.sf.vc.v.xvw.se.v4f64.i64.v4f64.v4f32.i64.i64(i64 3, <4 x double> %1, <4 x float> %0, i64 1, i64 4)
+// CHECK:   ret <4 x double> %1
+// CHECK: }
+llvm.func @fixed_wide_ternary_ivv(%arg0: vector<4xf32>, %arg1: vector<4xf64>) -> vector<4xf64> {
+  %0 = llvm.mlir.constant(1 : i5) : i5
+  %1 = llvm.zext %0 : i5 to i64
+  %2 = "vcix.intrin.wide.ternary"(%1, %arg0, %arg1) <{opcode = 3 : i64}> : (i64, vector<4xf32>, vector<4xf64>) -> vector<4xf64>
+  llvm.return %arg1 : vector<4xf64>
+}



More information about the Mlir-commits mailing list