[Mlir-commits] [mlir] 9f6c005 - [MLIR][VCIX] Support VCIX intrinsics in LLVMIR dialect (#75875)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Wed Feb 7 12:23:32 PST 2024
Author: Kolya Panchenko
Date: 2024-02-07T15:23:28-05:00
New Revision: 9f6c00565a82fc375d415804d54da1113f719b17
URL: https://github.com/llvm/llvm-project/commit/9f6c00565a82fc375d415804d54da1113f719b17
DIFF: https://github.com/llvm/llvm-project/commit/9f6c00565a82fc375d415804d54da1113f719b17.diff
LOG: [MLIR][VCIX] Support VCIX intrinsics in LLVMIR dialect (#75875)
The changeset extends LLVMIR intrinsics with VCIX intrinsics.
The VCIX intrinsics allow MLIR users to interact with RISC-V
co-processors that are compatible with `XSfvcp` extension
Source:
https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
Added:
mlir/include/mlir/Dialect/LLVMIR/VCIXDialect.h
mlir/include/mlir/Dialect/LLVMIR/VCIXOps.td
mlir/include/mlir/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.h
mlir/lib/Dialect/LLVMIR/IR/VCIXDialect.cpp
mlir/lib/Target/LLVMIR/Dialect/VCIX/CMakeLists.txt
mlir/lib/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.cpp
mlir/test/Conversion/MathToVCIX/math-to-vcix.mlir
mlir/test/Target/LLVMIR/vcix-rv32.mlir
mlir/test/Target/LLVMIR/vcix-rv64.mlir
mlir/test/lib/Conversion/MathToVCIX/CMakeLists.txt
mlir/test/lib/Conversion/MathToVCIX/TestMathToVCIXConversion.cpp
Modified:
mlir/include/mlir/Dialect/LLVMIR/CMakeLists.txt
mlir/include/mlir/Target/LLVMIR/Dialect/All.h
mlir/lib/Dialect/LLVMIR/CMakeLists.txt
mlir/lib/Target/LLVMIR/CMakeLists.txt
mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt
mlir/test/lib/Conversion/CMakeLists.txt
mlir/tools/mlir-opt/CMakeLists.txt
mlir/tools/mlir-opt/mlir-opt.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/LLVMIR/CMakeLists.txt b/mlir/include/mlir/Dialect/LLVMIR/CMakeLists.txt
index 8e41fcc05a161e..862abf00d03450 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/LLVMIR/CMakeLists.txt
@@ -71,3 +71,11 @@ mlir_tablegen(ROCDLConversions.inc -gen-llvmir-conversions)
mlir_tablegen(ROCDLOpsAttributes.h.inc -gen-attrdef-decls -attrdefs-dialect=rocdl)
mlir_tablegen(ROCDLOpsAttributes.cpp.inc -gen-attrdef-defs -attrdefs-dialect=rocdl)
add_public_tablegen_target(MLIRROCDLConversionsIncGen)
+
+add_mlir_dialect(VCIXOps vcix)
+add_mlir_doc(VCIXOps VCIXDialect Dialects/ -gen-dialect-doc -dialect=vcix)
+set(LLVM_TARGET_DEFINITIONS VCIXOps.td)
+mlir_tablegen(VCIXConversions.inc -gen-llvmir-conversions)
+mlir_tablegen(VCIXOpsAttributes.h.inc -gen-attrdef-decls -attrdefs-dialect=vcix)
+mlir_tablegen(VCIXOpsAttributes.cpp.inc -gen-attrdef-defs -attrdefs-dialect=vcix)
+add_public_tablegen_target(MLIRVCIXConversionsIncGen)
diff --git a/mlir/include/mlir/Dialect/LLVMIR/VCIXDialect.h b/mlir/include/mlir/Dialect/LLVMIR/VCIXDialect.h
new file mode 100644
index 00000000000000..3c911d6bc8677b
--- /dev/null
+++ b/mlir/include/mlir/Dialect/LLVMIR/VCIXDialect.h
@@ -0,0 +1,40 @@
+//===- VCIXDialect.h - MLIR VCIX IR dialect -------------------*- C++ ---*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The file defines the basic operations for the VCIX dialect.
+//
+// The SiFive Vector Coprocessor Interface (VCIX) provides a flexible mechanism
+// to extend application processors with custom coprocessors and
+// variable-latency arithmetic units. The interface offers throughput comparable
+// to that of standard RISC-V vector instructions. To accelerate performance,
+// system designers may use VCIX as a low-latency, high-throughput interface to
+// a coprocessor
+//
+// https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_LLVMIR_VCIXDIALECT_H_
+#define MLIR_DIALECT_LLVMIR_VCIXDIALECT_H_
+
+#include "mlir/Bytecode/BytecodeOpInterface.h"
+#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+#include "mlir/IR/Dialect.h"
+#include "mlir/IR/OpDefinition.h"
+#include "mlir/Interfaces/SideEffectInterfaces.h"
+
+///// Ops /////
+#define GET_ATTRDEF_CLASSES
+#include "mlir/Dialect/LLVMIR/VCIXOpsAttributes.h.inc"
+
+#define GET_OP_CLASSES
+#include "mlir/Dialect/LLVMIR/VCIXOps.h.inc"
+
+#include "mlir/Dialect/LLVMIR/VCIXOpsDialect.h.inc"
+
+#endif /* MLIR_DIALECT_LLVMIR_VCIXDIALECT_H_ */
diff --git a/mlir/include/mlir/Dialect/LLVMIR/VCIXOps.td b/mlir/include/mlir/Dialect/LLVMIR/VCIXOps.td
new file mode 100644
index 00000000000000..25c1d027768af2
--- /dev/null
+++ b/mlir/include/mlir/Dialect/LLVMIR/VCIXOps.td
@@ -0,0 +1,132 @@
+//===-- VCIX.td - VCIX dialect operation definitions *- tablegen -*--------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// The file defines the basic operations for the VCIX dialect.
+//
+// The SiFive Vector Coprocessor Interface (VCIX) provides a flexible mechanism
+// to extend application processors with custom coprocessors and
+// variable-latency arithmetic units. The interface offers throughput comparable
+// to that of standard RISC-V vector instructions. To accelerate performance,
+// system designers may use VCIX as a low-latency, high-throughput interface to
+// a coprocessor
+//
+// https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef VCIXIR_OPS
+
+include "mlir/IR/OpBase.td"
+include "mlir/Dialect/LLVMIR/LLVMOpBase.td"
+
+//===----------------------------------------------------------------------===//
+// VCIX dialect definition.
+//===----------------------------------------------------------------------===//
+
+def VCIX_Dialect : Dialect {
+ let name = "vcix";
+ let cppNamespace = "::mlir::vcix";
+ let dependentDialects = ["LLVM::LLVMDialect"];
+ let description = [{
+ The SiFive Vector Coprocessor Interface (VCIX) provides a flexible mechanism
+ to extend application processors with custom coprocessors and
+ variable-latency arithmetic units. The interface offers throughput comparable
+ to that of standard RISC-V vector instructions. To accelerate performance,
+ system designers may use VCIX as a low-latency, high-throughput interface to
+ a coprocessor
+
+ https://www.sifive.com/document-file/sifive-vector-coprocessor-interface-vcix-software
+ }];
+}
+
+// Special version for intrinsic version where int attr is zext to i32 or i64
+// depending on xlen of the target.
+def VCIX_VectorOrScalar
+ : AnyTypeOf<[LLVM_AnyVector, I<64>, I<32>, F<16>, F<32>, F<64>]>;
+def VCIX_OpcodeAttr : AnyAttrOf<[I32Attr, I64Attr]>;
+def VCIX_Register : AnyTypeOf<[I32, I64]>;
+def VCIX_ImmAttr : AnyAttrOf<[I32Attr, I64Attr]>;
+def VCIX_VL : AnyTypeOf<[I<64>, I<32>]>;
+
+class VCIX_Op<string mnemonic, list<Trait> traits = []>
+ : LLVM_OpBase<VCIX_Dialect, mnemonic, traits> {
+}
+
+def VCIX_BinaryImmOp : VCIX_Op<"v.iv">,
+ Results<(outs LLVM_AnyVector: $res)>,
+ Arguments<(ins VCIX_OpcodeAttr: $opcode,
+ LLVM_AnyVector: $vs2,
+ VCIX_ImmAttr: $imm,
+ Optional<VCIX_VL>: $vl)> {
+ let summary = "Binary VCIX operation with an immediate second operand";
+ let description = [{
+ Binary VCIX operation with an immediate second operand.
+
+ Correponds to:
+ |Mnemonic|funct6|vm|rs2|rs1|funct3|rd|Destination|Sources|
+ |--|--|--|--|--|--|--|--|--|
+ |sf.vc.v.iv|0010--|0|vs2|simm|011|vd|vector vd| simm[4:0] vector vs2|
+ }];
+
+ string llvmBuilder = [{
+ llvm::Type *xlen =getXlenType($opcode, moduleTranslation);
+ llvm::Value *opcodeConst = mlir::LLVM::detail::getLLVMConstant(
+ xlen, $opcode, $_location, moduleTranslation);
+ llvm::Value *immConst = mlir::LLVM::detail::getLLVMConstant(
+ xlen, $imm, $_location, moduleTranslation);
+ VectorType vt = op.getResult().getType().cast<VectorType>();
+ llvm::Value *vl =
+ createVL(builder, $vl, vt, xlen, $_location, moduleTranslation);
+ $res = createIntrinsicCall(
+ builder, llvm::Intrinsic::riscv_sf_vc_v_iv_se,
+ {opcodeConst, $vs2, immConst, vl},
+ {$_resultType, xlen, $vs2->getType(), xlen, xlen});
+ }];
+}
+
+def VCIX_BinaryOp : VCIX_Op<"v.sv">,
+ Results<(outs LLVM_AnyVector: $res)>,
+ Arguments<(ins VCIX_OpcodeAttr: $opcode,
+ LLVM_AnyVector: $vs2,
+ VCIX_VectorOrScalar: $op,
+ Optional<VCIX_VL>: $vl)> {
+ let summary = "Binary VCIX operation";
+ let description = [{
+ Binary VCIX operation with an integer scalar, or floating pointer scalar or
+ vector second operand.
+
+ Correponds to:
+ |Mnemonic|funct6|vm|rs2|rs1|funct3|rd|Destination| Sources|
+ |--|--|--|--|--|--|--|--|--|--|
+ |sf.vc.v.vv|0010--|0|vs2|vs1|000|vd|vector vd|vector vs1, vector vs|
+ |sf.vc.v.xv|0010--|0|vs2|xs1|100|vd|vector vd|scalar xs1, vector vs2|
+ |sf.vc.v.fv|0010--|0|vs2|fs1|101|vd|vector vd|scalar fs1, vector vs2|
+ }];
+
+ string llvmBuilder = [{
+ llvm::Type *xlen = getXlenType($opcode, moduleTranslation);
+ llvm::Value *opcodeConst = mlir::LLVM::detail::getLLVMConstant(
+ xlen, $opcode, $_location, moduleTranslation);
+ llvm::Intrinsic::ID id;
+ llvm::Type *opType = $op->getType();
+ if (opType->isVectorTy()) {
+ id = llvm::Intrinsic::riscv_sf_vc_v_vv_se;
+ } else if (opType->isIntegerTy()) {
+ id = llvm::Intrinsic::riscv_sf_vc_v_xv_se;
+ } else {
+ id = llvm::Intrinsic::riscv_sf_vc_v_fv_se;
+ }
+ VectorType vt = op.getResult().getType().cast<VectorType>();
+ llvm::Value *vl =
+ createVL(builder, $vl, vt, xlen, $_location, moduleTranslation);
+ $res = createIntrinsicCall(
+ builder, id, {opcodeConst, $vs2, $op, vl},
+ {$_resultType, xlen, $vs2->getType(), $op->getType(), xlen});
+ }];
+}
+
+#endif // VCIXIR_OPS
diff --git a/mlir/include/mlir/Target/LLVMIR/Dialect/All.h b/mlir/include/mlir/Target/LLVMIR/Dialect/All.h
index 0b37e23e45118b..de9d5872cc4546 100644
--- a/mlir/include/mlir/Target/LLVMIR/Dialect/All.h
+++ b/mlir/include/mlir/Target/LLVMIR/Dialect/All.h
@@ -28,6 +28,7 @@
#include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/SPIRV/SPIRVToLLVMIRTranslation.h"
+#include "mlir/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/X86Vector/X86VectorToLLVMIRTranslation.h"
namespace mlir {
@@ -48,6 +49,7 @@ static inline void registerAllToLLVMIRTranslations(DialectRegistry ®istry) {
registerOpenMPDialectTranslation(registry);
registerROCDLDialectTranslation(registry);
registerSPIRVDialectTranslation(registry);
+ registerVCIXDialectTranslation(registry);
registerX86VectorDialectTranslation(registry);
// Extension required for translating GPU offloading Ops.
diff --git a/mlir/include/mlir/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.h b/mlir/include/mlir/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.h
new file mode 100644
index 00000000000000..7863597c21b144
--- /dev/null
+++ b/mlir/include/mlir/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.h
@@ -0,0 +1,31 @@
+//===- VCIXToLLVMIRTranslation.h - VCIX to LLVM IR ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides registration calls for VCIX dialect to LLVM IR translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_TARGET_LLVMIR_DIALECT_VCIX_VCIXTOLLVMIRTRANSLATION_H
+#define MLIR_TARGET_LLVMIR_DIALECT_VCIX_VCIXTOLLVMIRTRANSLATION_H
+
+namespace mlir {
+
+class DialectRegistry;
+class MLIRContext;
+
+/// Register the VCIX dialect and the translation from it to the LLVM IR in the
+/// given registry.
+void registerVCIXDialectTranslation(DialectRegistry ®istry);
+
+/// Register the VCIX dialect and the translation from it in the registry
+/// associated with the given context.
+void registerVCIXDialectTranslation(MLIRContext &context);
+
+} // namespace mlir
+
+#endif // MLIR_TARGET_LLVMIR_DIALECT_VCIX_VCIXTOLLVMIRTRANSLATION_H
diff --git a/mlir/lib/Dialect/LLVMIR/CMakeLists.txt b/mlir/lib/Dialect/LLVMIR/CMakeLists.txt
index b00259677697a5..392065b859ee54 100644
--- a/mlir/lib/Dialect/LLVMIR/CMakeLists.txt
+++ b/mlir/lib/Dialect/LLVMIR/CMakeLists.txt
@@ -85,3 +85,25 @@ add_mlir_dialect_library(MLIRROCDLDialect
MLIRLLVMDialect
MLIRSideEffectInterfaces
)
+
+add_mlir_dialect_library(MLIRVCIXDialect
+ IR/VCIXDialect.cpp
+
+ ADDITIONAL_HEADER_DIRS
+ ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/LLVMIR
+
+ DEPENDS
+ MLIRGPUCompilationAttrInterfacesIncGen
+ MLIRVCIXOpsIncGen
+ MLIRVCIXConversionsIncGen
+ intrinsics_gen
+
+ LINK_COMPONENTS
+ AsmParser
+ Core
+
+ LINK_LIBS PUBLIC
+ MLIRIR
+ MLIRLLVMDialect
+ MLIRSideEffectInterfaces
+ )
diff --git a/mlir/lib/Dialect/LLVMIR/IR/VCIXDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/VCIXDialect.cpp
new file mode 100644
index 00000000000000..bd9d3528ceb74a
--- /dev/null
+++ b/mlir/lib/Dialect/LLVMIR/IR/VCIXDialect.cpp
@@ -0,0 +1,54 @@
+//===- VCIXDialect.cpp - MLIR VCIX ops implementation ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the VCIX dialect and its operations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/LLVMIR/VCIXDialect.h"
+
+#include "mlir/Dialect/GPU/IR/CompilationInterfaces.h"
+#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/BuiltinTypes.h"
+#include "mlir/IR/DialectImplementation.h"
+#include "mlir/IR/MLIRContext.h"
+#include "mlir/IR/Operation.h"
+#include "llvm/ADT/TypeSwitch.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/SourceMgr.h"
+
+using namespace mlir;
+using namespace vcix;
+
+#include "mlir/Dialect/LLVMIR/VCIXOpsDialect.cpp.inc"
+
+//===----------------------------------------------------------------------===//
+// VCIXDialect initialization, type parsing, and registration.
+//===----------------------------------------------------------------------===//
+
+void VCIXDialect::initialize() {
+ addOperations<
+#define GET_OP_LIST
+#include "mlir/Dialect/LLVMIR/VCIXOps.cpp.inc"
+ >();
+
+ addAttributes<
+#define GET_ATTRDEF_LIST
+#include "mlir/Dialect/LLVMIR/VCIXOpsAttributes.cpp.inc"
+ >();
+}
+
+#define GET_OP_CLASSES
+#include "mlir/Dialect/LLVMIR/VCIXOps.cpp.inc"
+
+#define GET_ATTRDEF_CLASSES
+#include "mlir/Dialect/LLVMIR/VCIXOpsAttributes.cpp.inc"
diff --git a/mlir/lib/Target/LLVMIR/CMakeLists.txt b/mlir/lib/Target/LLVMIR/CMakeLists.txt
index 94280a2ec9012b..93032c3ce10387 100644
--- a/mlir/lib/Target/LLVMIR/CMakeLists.txt
+++ b/mlir/lib/Target/LLVMIR/CMakeLists.txt
@@ -59,6 +59,7 @@ add_mlir_translation_library(MLIRToLLVMIRTranslationRegistration
MLIROpenMPToLLVMIRTranslation
MLIRROCDLToLLVMIRTranslation
MLIRSPIRVToLLVMIRTranslation
+ MLIRVCIXToLLVMIRTranslation
)
add_mlir_translation_library(MLIRTargetLLVMIRImport
diff --git a/mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt b/mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt
index c9d916d8a5d82d..a88e8b1fd8338e 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt
+++ b/mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt
@@ -10,4 +10,5 @@ add_subdirectory(OpenACC)
add_subdirectory(OpenMP)
add_subdirectory(ROCDL)
add_subdirectory(SPIRV)
+add_subdirectory(VCIX)
add_subdirectory(X86Vector)
diff --git a/mlir/lib/Target/LLVMIR/Dialect/VCIX/CMakeLists.txt b/mlir/lib/Target/LLVMIR/Dialect/VCIX/CMakeLists.txt
new file mode 100644
index 00000000000000..d2622af16d9383
--- /dev/null
+++ b/mlir/lib/Target/LLVMIR/Dialect/VCIX/CMakeLists.txt
@@ -0,0 +1,16 @@
+add_mlir_translation_library(MLIRVCIXToLLVMIRTranslation
+ VCIXToLLVMIRTranslation.cpp
+
+ DEPENDS
+ MLIRVCIXConversionsIncGen
+
+ LINK_COMPONENTS
+ Core
+
+ LINK_LIBS PUBLIC
+ MLIRIR
+ MLIRLLVMDialect
+ MLIRVCIXDialect
+ MLIRSupport
+ MLIRTargetLLVMIRExport
+ )
diff --git a/mlir/lib/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.cpp
new file mode 100644
index 00000000000000..8212725b5a58be
--- /dev/null
+++ b/mlir/lib/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.cpp
@@ -0,0 +1,88 @@
+//===- VCIXToLLVMIRTranslation.cpp - Translate VCIX to LLVM IR ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a translation between the MLIR VCIX dialect and
+// LLVM IR.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Target/LLVMIR/Dialect/VCIX/VCIXToLLVMIRTranslation.h"
+#include "mlir/Dialect/LLVMIR/VCIXDialect.h"
+#include "mlir/IR/BuiltinAttributes.h"
+#include "mlir/IR/Operation.h"
+#include "mlir/Target/LLVMIR/ModuleTranslation.h"
+
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace mlir;
+using namespace mlir::LLVM;
+using mlir::LLVM::detail::createIntrinsicCall;
+
+/// Infer XLen type from opcode's type. This is done to avoid passing target
+/// option around.
+static llvm::Type *getXlenType(Attribute opcodeAttr,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ auto intAttr = opcodeAttr.cast<IntegerAttr>();
+ unsigned xlenWidth = intAttr.getType().cast<IntegerType>().getWidth();
+ return llvm::Type::getIntNTy(moduleTranslation.getLLVMContext(), xlenWidth);
+}
+
+/// Return VL for VCIX intrinsic. If vl was previously set, return it,
+/// otherwise construct a constant using fixed vector type.
+static llvm::Value *createVL(llvm::IRBuilderBase &builder, llvm::Value *vl,
+ VectorType vtype, llvm::Type *xlen, Location loc,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ if (vl) {
+ assert(vtype.isScalable() &&
+ "vl parameter must be set for scalable vectors");
+ return builder.CreateZExtOrTrunc(vl, xlen);
+ }
+
+ assert(vtype.getRank() == 1 && "Only 1-d fixed vectors are supported");
+ return mlir::LLVM::detail::getLLVMConstant(
+ xlen,
+ IntegerAttr::get(IntegerType::get(&moduleTranslation.getContext(), 64),
+ vtype.getShape()[0]),
+ loc, moduleTranslation);
+}
+
+namespace {
+/// Implementation of the dialect interface that converts operations belonging
+/// to the VCIX dialect to LLVM IR.
+class VCIXDialectLLVMIRTranslationInterface
+ : public LLVMTranslationDialectInterface {
+public:
+ using LLVMTranslationDialectInterface::LLVMTranslationDialectInterface;
+
+ /// Translates the given operation to LLVM IR using the provided IR builder
+ /// and saving the state in `moduleTranslation`.
+ LogicalResult
+ convertOperation(Operation *op, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) const final {
+ Operation &opInst = *op;
+#include "mlir/Dialect/LLVMIR/VCIXConversions.inc"
+
+ return failure();
+ }
+};
+} // namespace
+
+void mlir::registerVCIXDialectTranslation(DialectRegistry ®istry) {
+ registry.insert<vcix::VCIXDialect>();
+ registry.addExtension(+[](MLIRContext *ctx, vcix::VCIXDialect *dialect) {
+ dialect->addInterfaces<VCIXDialectLLVMIRTranslationInterface>();
+ });
+}
+
+void mlir::registerVCIXDialectTranslation(MLIRContext &context) {
+ DialectRegistry registry;
+ registerVCIXDialectTranslation(registry);
+ context.appendDialectRegistry(registry);
+}
diff --git a/mlir/test/Conversion/MathToVCIX/math-to-vcix.mlir b/mlir/test/Conversion/MathToVCIX/math-to-vcix.mlir
new file mode 100644
index 00000000000000..3563b2c1cf810a
--- /dev/null
+++ b/mlir/test/Conversion/MathToVCIX/math-to-vcix.mlir
@@ -0,0 +1,193 @@
+// RUN: mlir-opt --split-input-file --verify-diagnostics --test-math-to-vcix %s | FileCheck %s
+
+// CHECK-LABEL: func.func @cos(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[8]xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) -> vector<[8]xf32> {
+// CHECK: %[[VAL_2:.*]] = arith.constant 9 : i64
+// CHECK: %[[VAL_3:.*]] = "vcix.v.iv"(%[[VAL_0]], %[[VAL_2]]) <{imm = 0 : i32, opcode = 0 : i64}> : (vector<[8]xf32>, i64) -> vector<[8]xf32>
+// CHECK: return %[[VAL_3]] : vector<[8]xf32>
+// CHECK: }
+func.func @cos(%a: vector<[8] x f32>, %rvl: i64) -> vector<[8] x f32> {
+ %res = math.cos %a : vector<[8] x f32>
+ return %res : vector<[8] x f32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @cos_req_legalization(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[32]xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) -> vector<[32]xf32> {
+// CHECK: %[[VAL_2:.*]] = arith.constant 9 : i64
+// CHECK: %[[VAL_3:.*]] = arith.constant dense<0.000000e+00> : vector<[32]xf32>
+// CHECK: %[[VAL_4:.*]] = vector.scalable.extract %[[VAL_0]][0] : vector<[16]xf32> from vector<[32]xf32>
+// CHECK: %[[VAL_5:.*]] = "vcix.v.iv"(%[[VAL_4]], %[[VAL_2]]) <{imm = 0 : i32, opcode = 0 : i64}> : (vector<[16]xf32>, i64) -> vector<[16]xf32>
+// CHECK: %[[VAL_6:.*]] = vector.scalable.insert %[[VAL_5]], %[[VAL_3]][0] : vector<[16]xf32> into vector<[32]xf32>
+// CHECK: %[[VAL_7:.*]] = vector.scalable.extract %[[VAL_0]][16] : vector<[16]xf32> from vector<[32]xf32>
+// CHECK: %[[VAL_8:.*]] = "vcix.v.iv"(%[[VAL_7]], %[[VAL_2]]) <{imm = 0 : i32, opcode = 0 : i64}> : (vector<[16]xf32>, i64) -> vector<[16]xf32>
+// CHECK: %[[VAL_9:.*]] = vector.scalable.insert %[[VAL_8]], %[[VAL_6]][16] : vector<[16]xf32> into vector<[32]xf32>
+// CHECK: return %[[VAL_9]] : vector<[32]xf32>
+// CHECK: }
+func.func @cos_req_legalization(%a: vector<[32] x f32>, %rvl: i64) -> vector<[32] x f32> {
+ %res = math.cos %a : vector<[32] x f32>
+ return %res : vector<[32] x f32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @cos_fixed(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<8xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) -> vector<8xf32> {
+// CHECK: %[[VAL_2:.*]] = "vcix.v.iv"(%[[VAL_0]]) <{imm = 0 : i32, opcode = 0 : i64}> : (vector<8xf32>) -> vector<8xf32>
+// CHECK: return %[[VAL_2]] : vector<8xf32>
+// CHECK: }
+func.func @cos_fixed(%a: vector<8 x f32>, %rvl: i64) -> vector<8 x f32> {
+ %res = math.cos %a : vector<8 x f32>
+ return %res : vector<8 x f32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @sin(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[8]xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) -> vector<[8]xf32> {
+// CHECK: %[[VAL_2:.*]] = arith.constant 9 : i64
+// CHECK: %[[VAL_3:.*]] = "vcix.v.sv"(%[[VAL_0]], %[[VAL_0]], %[[VAL_2]]) <{opcode = 0 : i64}> : (vector<[8]xf32>, vector<[8]xf32>, i64) -> vector<[8]xf32>
+// CHECK: return %[[VAL_3]] : vector<[8]xf32>
+// CHECK: }
+func.func @sin(%a: vector<[8] x f32>, %rvl: i64) -> vector<[8] x f32> {
+ %res = math.sin %a : vector<[8] x f32>
+ return %res : vector<[8] x f32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @sin_req_legalization(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[32]xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) -> vector<[32]xf32> {
+// CHECK: %[[VAL_2:.*]] = arith.constant 9 : i64
+// CHECK: %[[VAL_3:.*]] = arith.constant dense<0.000000e+00> : vector<[32]xf32>
+// CHECK: %[[VAL_4:.*]] = vector.scalable.extract %[[VAL_0]][0] : vector<[16]xf32> from vector<[32]xf32>
+// CHECK: %[[VAL_5:.*]] = "vcix.v.sv"(%[[VAL_4]], %[[VAL_4]], %[[VAL_2]]) <{opcode = 0 : i64}> : (vector<[16]xf32>, vector<[16]xf32>, i64) -> vector<[16]xf32>
+// CHECK: %[[VAL_6:.*]] = vector.scalable.insert %[[VAL_5]], %[[VAL_3]][0] : vector<[16]xf32> into vector<[32]xf32>
+// CHECK: %[[VAL_7:.*]] = vector.scalable.extract %[[VAL_0]][16] : vector<[16]xf32> from vector<[32]xf32>
+// CHECK: %[[VAL_8:.*]] = "vcix.v.sv"(%[[VAL_7]], %[[VAL_7]], %[[VAL_2]]) <{opcode = 0 : i64}> : (vector<[16]xf32>, vector<[16]xf32>, i64) -> vector<[16]xf32>
+// CHECK: %[[VAL_9:.*]] = vector.scalable.insert %[[VAL_8]], %[[VAL_6]][16] : vector<[16]xf32> into vector<[32]xf32>
+// CHECK: return %[[VAL_9]] : vector<[32]xf32>
+// CHECK: }
+func.func @sin_req_legalization(%a: vector<[32] x f32>, %rvl: i64) -> vector<[32] x f32> {
+ %res = math.sin %a : vector<[32] x f32>
+ return %res : vector<[32] x f32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @sin_fixed(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<8xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) -> vector<8xf32> {
+// CHECK: %[[VAL_2:.*]] = "vcix.v.sv"(%[[VAL_0]], %[[VAL_0]]) <{opcode = 0 : i64}> : (vector<8xf32>, vector<8xf32>) -> vector<8xf32>
+// CHECK: return %[[VAL_2]] : vector<8xf32>
+// CHECK: }
+func.func @sin_fixed(%a: vector<8 x f32>, %rvl: i64) -> vector<8 x f32> {
+ %res = math.sin %a : vector<8 x f32>
+ return %res : vector<8 x f32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @tan(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[8]xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) -> vector<[8]xf32> {
+// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
+// CHECK: %[[VAL_3:.*]] = arith.constant 9 : i64
+// CHECK: %[[VAL_4:.*]] = "vcix.v.sv"(%[[VAL_0]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 0 : i64}> : (vector<[8]xf32>, f32, i64) -> vector<[8]xf32>
+// CHECK: return %[[VAL_4]] : vector<[8]xf32>
+// CHECK: }
+func.func @tan(%a: vector<[8] x f32>, %rvl: i64) -> vector<[8] x f32> {
+ %res = math.tan %a : vector<[8] x f32>
+ return %res : vector<[8] x f32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @tan_req_legalization(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[32]xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) -> vector<[32]xf32> {
+// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
+// CHECK: %[[VAL_3:.*]] = arith.constant 9 : i64
+// CHECK: %[[VAL_4:.*]] = arith.constant dense<0.000000e+00> : vector<[32]xf32>
+// CHECK: %[[VAL_5:.*]] = vector.scalable.extract %[[VAL_0]][0] : vector<[16]xf32> from vector<[32]xf32>
+// CHECK: %[[VAL_6:.*]] = "vcix.v.sv"(%[[VAL_5]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 0 : i64}> : (vector<[16]xf32>, f32, i64) -> vector<[16]xf32>
+// CHECK: %[[VAL_7:.*]] = vector.scalable.insert %[[VAL_6]], %[[VAL_4]][0] : vector<[16]xf32> into vector<[32]xf32>
+// CHECK: %[[VAL_8:.*]] = vector.scalable.extract %[[VAL_0]][16] : vector<[16]xf32> from vector<[32]xf32>
+// CHECK: %[[VAL_9:.*]] = "vcix.v.sv"(%[[VAL_8]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 0 : i64}> : (vector<[16]xf32>, f32, i64) -> vector<[16]xf32>
+// CHECK: %[[VAL_10:.*]] = vector.scalable.insert %[[VAL_9]], %[[VAL_7]][16] : vector<[16]xf32> into vector<[32]xf32>
+// CHECK: return %[[VAL_10]] : vector<[32]xf32>
+// CHECK: }
+func.func @tan_req_legalization(%a: vector<[32] x f32>, %rvl: i64) -> vector<[32] x f32> {
+ %res = math.tan %a : vector<[32] x f32>
+ return %res : vector<[32] x f32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @tan_fixed(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<8xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) -> vector<8xf32> {
+// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
+// CHECK: %[[VAL_3:.*]] = "vcix.v.sv"(%[[VAL_0]], %[[VAL_2]]) <{opcode = 0 : i64}> : (vector<8xf32>, f32) -> vector<8xf32>
+// CHECK: return %[[VAL_3]] : vector<8xf32>
+// CHECK: }
+func.func @tan_fixed(%a: vector<8 x f32>, %rvl: i64) -> vector<8 x f32> {
+ %res = math.tan %a : vector<8 x f32>
+ return %res : vector<8 x f32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @log(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[8]xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) -> vector<[8]xf32> {
+// CHECK: %[[VAL_2:.*]] = arith.constant 0 : i32
+// CHECK: %[[VAL_3:.*]] = arith.constant 9 : i64
+// CHECK: %[[VAL_4:.*]] = "vcix.v.sv"(%[[VAL_0]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 0 : i64}> : (vector<[8]xf32>, i32, i64) -> vector<[8]xf32>
+// CHECK: return %[[VAL_4]] : vector<[8]xf32>
+// CHECK: }
+func.func @log(%a: vector<[8] x f32>, %rvl: i64) -> vector<[8] x f32> {
+ %res = math.log %a : vector<[8] x f32>
+ return %res : vector<[8] x f32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @log_req_legalization(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<[32]xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) -> vector<[32]xf32> {
+// CHECK: %[[VAL_2:.*]] = arith.constant 0 : i32
+// CHECK: %[[VAL_3:.*]] = arith.constant 9 : i64
+// CHECK: %[[VAL_4:.*]] = arith.constant dense<0.000000e+00> : vector<[32]xf32>
+// CHECK: %[[VAL_5:.*]] = vector.scalable.extract %[[VAL_0]][0] : vector<[16]xf32> from vector<[32]xf32>
+// CHECK: %[[VAL_6:.*]] = "vcix.v.sv"(%[[VAL_5]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 0 : i64}> : (vector<[16]xf32>, i32, i64) -> vector<[16]xf32>
+// CHECK: %[[VAL_7:.*]] = vector.scalable.insert %[[VAL_6]], %[[VAL_4]][0] : vector<[16]xf32> into vector<[32]xf32>
+// CHECK: %[[VAL_8:.*]] = vector.scalable.extract %[[VAL_0]][16] : vector<[16]xf32> from vector<[32]xf32>
+// CHECK: %[[VAL_9:.*]] = "vcix.v.sv"(%[[VAL_8]], %[[VAL_2]], %[[VAL_3]]) <{opcode = 0 : i64}> : (vector<[16]xf32>, i32, i64) -> vector<[16]xf32>
+// CHECK: %[[VAL_10:.*]] = vector.scalable.insert %[[VAL_9]], %[[VAL_7]][16] : vector<[16]xf32> into vector<[32]xf32>
+// CHECK: return %[[VAL_10]] : vector<[32]xf32>
+// CHECK: }
+func.func @log_req_legalization(%a: vector<[32] x f32>, %rvl: i64) -> vector<[32] x f32> {
+ %res = math.log %a : vector<[32] x f32>
+ return %res : vector<[32] x f32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @log_fixed(
+// CHECK-SAME: %[[VAL_0:.*]]: vector<8xf32>,
+// CHECK-SAME: %[[VAL_1:.*]]: i64) -> vector<8xf32> {
+// CHECK: %[[VAL_2:.*]] = arith.constant 0 : i32
+// CHECK: %[[VAL_3:.*]] = "vcix.v.sv"(%[[VAL_0]], %[[VAL_2]]) <{opcode = 0 : i64}> : (vector<8xf32>, i32) -> vector<8xf32>
+// CHECK: return %[[VAL_3]] : vector<8xf32>
+// CHECK: }
+func.func @log_fixed(%a: vector<8 x f32>, %rvl: i64) -> vector<8 x f32> {
+ %res = math.log %a : vector<8 x f32>
+ return %res : vector<8 x f32>
+}
diff --git a/mlir/test/Target/LLVMIR/vcix-rv32.mlir b/mlir/test/Target/LLVMIR/vcix-rv32.mlir
new file mode 100644
index 00000000000000..e70247235fef63
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/vcix-rv32.mlir
@@ -0,0 +1,177 @@
+// RUN: mlir-translate --split-input-file --mlir-to-llvmir %s | FileCheck %s
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_fv(<vscale x 4 x float> %0, float %1, i32 %2) {
+// CHECK-NEXT: %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.i32.nxv4f32.f32.i32(i32 1, <vscale x 4 x float> %0, float %1, i32 %2)
+// CHECK-NEXT: ret <vscale x 4 x float> %4
+// CHECK-NEXT: }
+llvm.func @binary_fv(%arg0: vector<[4]xf32>, %arg1: f32, %vl: i32) -> vector<[4]xf32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1, %vl) <{opcode = 1 : i32}> : (vector<[4]xf32>, f32, i32) -> vector<[4]xf32>
+ llvm.return %0 : vector<[4]xf32>
+}
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_xv(<vscale x 4 x float> %0, i32 %1, i32 %2) {
+// CHECK-NEXT: %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i32.nxv4f32.i32.i32(i32 3, <vscale x 4 x float> %0, i32 %1, i32 %2)
+// CHECK-NEXT: ret <vscale x 4 x float> %4
+// CHECK-NEXT: }
+llvm.func @binary_xv(%arg0: vector<[4]xf32>, %arg1: i32, %vl: i32) -> vector<[4]xf32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1, %vl) <{opcode = 3 : i32}> : (vector<[4]xf32>, i32, i32) -> vector<[4]xf32>
+ llvm.return %0 : vector<[4]xf32>
+}
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_vv(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) {
+// CHECK-NEXT: %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.i32.nxv4f32.nxv4f32.i32(i32 3, <vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2)
+// CHECK-NEXT: ret <vscale x 4 x float> %4
+// CHECK-NEXT: }
+llvm.func @binary_vv(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %vl: i32) -> vector<[4]xf32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1, %vl) <{opcode = 3 : i32}> : (vector<[4]xf32>, vector<[4]xf32>, i32) -> vector<[4]xf32>
+ llvm.return %0 : vector<[4]xf32>
+}
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_iv(<vscale x 4 x float> %0, i32 %1) {
+// CHECK-NEXT: %3 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.i32.nxv4f32.i32.i32(i32 3, <vscale x 4 x float> %0, i32 5, i32 %1)
+// CHECK-NEXT: ret <vscale x 4 x float> %3
+// CHECK-NEXT: }
+llvm.func @binary_iv(%arg0: vector<[4]xf32>, %vl: i32) -> vector<[4]xf32> {
+ %0 = "vcix.v.iv"(%arg0, %vl) <{opcode = 3 : i32, imm = 5 : i32}> : (vector<[4]xf32>, i32) -> vector<[4]xf32>
+ llvm.return %0 : vector<[4]xf32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x float> @binary_fixed_fv(<4 x float> %0, float %1) {
+// CHECK-NEXT: %3 = call <4 x float> @llvm.riscv.sf.vc.v.fv.se.v4f32.i32.v4f32.f32.i32(i32 1, <4 x float> %0, float %1, i32 4)
+// CHECK-NEXT: ret <4 x float> %3
+// CHECK-NEXT: }
+llvm.func @binary_fixed_fv(%arg0: vector<4xf32>, %arg1: f32) -> vector<4xf32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1) <{opcode = 1 : i32}> : (vector<4xf32>, f32) -> vector<4xf32>
+ llvm.return %0 : vector<4xf32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x float> @binary_fixed_xv(<4 x float> %0, i32 %1) {
+// CHECK-NEXT: %3 = call <4 x float> @llvm.riscv.sf.vc.v.xv.se.v4f32.i32.v4f32.i32.i32(i32 3, <4 x float> %0, i32 %1, i32 4)
+// CHECK-NEXT: ret <4 x float> %3
+// CHECK-NEXT: }
+llvm.func @binary_fixed_xv(%arg0: vector<4xf32>, %arg1: i32) -> vector<4xf32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1) <{opcode = 3 : i32}> : (vector<4xf32>, i32) -> vector<4xf32>
+ llvm.return %0 : vector<4xf32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x float> @binary_fixed_vv(<4 x float> %0, <4 x float> %1) {
+// CHECK-NEXT: %3 = call <4 x float> @llvm.riscv.sf.vc.v.vv.se.v4f32.i32.v4f32.v4f32.i32(i32 3, <4 x float> %0, <4 x float> %1, i32 4)
+// CHECK-NEXT: ret <4 x float> %3
+// CHECK-NEXT: }
+llvm.func @binary_fixed_vv(%arg0: vector<4xf32>, %arg1: vector<4xf32>) -> vector<4xf32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1) <{opcode = 3 : i32}> : (vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+ llvm.return %0 : vector<4xf32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x float> @binary_fixed_iv(<4 x float> %0) {
+// CHECK-NEXT: %2 = call <4 x float> @llvm.riscv.sf.vc.v.iv.se.v4f32.i32.v4f32.i32.i32(i32 3, <4 x float> %0, i32 5, i32 4)
+// CHECK-NEXT: ret <4 x float> %2
+// CHECK-NEXT: }
+llvm.func @binary_fixed_iv(%arg0: vector<4xf32>) -> vector<4xf32> {
+ %0 = "vcix.v.iv"(%arg0) <{opcode = 3 : i32, imm = 5 : i32}> : (vector<4xf32>) -> vector<4xf32>
+ llvm.return %0 : vector<4xf32>
+}
+
+// Test integer type
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x i32> @binary_i_fv(<vscale x 4 x i32> %0, float %1, i32 %2) {
+// CHECK-NEXT: %4 = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.i32.nxv4i32.f32.i32(i32 1, <vscale x 4 x i32> %0, float %1, i32 %2)
+// CHECK-NEXT: ret <vscale x 4 x i32> %4
+// CHECK-NEXT: }
+llvm.func @binary_i_fv(%arg0: vector<[4]xi32>, %arg1: f32, %vl: i32) -> vector<[4]xi32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1, %vl) <{opcode = 1 : i32}> : (vector<[4]xi32>, f32, i32) -> vector<[4]xi32>
+ llvm.return %0 : vector<[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x i32> @binary_i_xv(<vscale x 4 x i32> %0, i32 %1, i32 %2) {
+// CHECK-NEXT: %4 = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> %0, i32 %1, i32 %2)
+// CHECK-NEXT: ret <vscale x 4 x i32> %4
+// CHECK-NEXT: }
+llvm.func @binary_i_xv(%arg0: vector<[4]xi32>, %arg1: i32, %vl: i32) -> vector<[4]xi32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1, %vl) <{opcode = 3 : i32}> : (vector<[4]xi32>, i32, i32) -> vector<[4]xi32>
+ llvm.return %0 : vector<[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x i32> @binary_i_vv(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) {
+// CHECK-NEXT: %4 = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.i32.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2)
+// CHECK-NEXT: ret <vscale x 4 x i32> %4
+// CHECK-NEXT: }
+llvm.func @binary_i_vv(%arg0: vector<[4]xi32>, %arg1: vector<[4]xi32>, %vl: i32) -> vector<[4]xi32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1, %vl) <{opcode = 3 : i32}> : (vector<[4]xi32>, vector<[4]xi32>, i32) -> vector<[4]xi32>
+ llvm.return %0 : vector<[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x i32> @binary_i_iv(<vscale x 4 x i32> %0, i32 %1) {
+// CHECK-NEXT: %3 = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> %0, i32 5, i32 %1)
+// CHECK-NEXT: ret <vscale x 4 x i32> %3
+// CHECK-NEXT: }
+llvm.func @binary_i_iv(%arg0: vector<[4]xi32>, %vl: i32) -> vector<[4]xi32> {
+ %0 = "vcix.v.iv"(%arg0, %vl) <{opcode = 3 : i32, imm = 5 : i32}> : (vector<[4]xi32>, i32) -> vector<[4]xi32>
+ llvm.return %0 : vector<[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x i32> @binary_i_fixed_fv(<4 x i32> %0, float %1) {
+// CHECK-NEXT: %3 = call <4 x i32> @llvm.riscv.sf.vc.v.fv.se.v4i32.i32.v4i32.f32.i32(i32 1, <4 x i32> %0, float %1, i32 4)
+// CHECK-NEXT: ret <4 x i32> %3
+// CHECK-NEXT: }
+llvm.func @binary_i_fixed_fv(%arg0: vector<4xi32>, %arg1: f32) -> vector<4xi32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1) <{opcode = 1 : i32}> : (vector<4xi32>, f32) -> vector<4xi32>
+ llvm.return %0 : vector<4xi32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x i32> @binary_i_fixed_xv(<4 x i32> %0, i32 %1) {
+// CHECK-NEXT: %3 = call <4 x i32> @llvm.riscv.sf.vc.v.xv.se.v4i32.i32.v4i32.i32.i32(i32 3, <4 x i32> %0, i32 %1, i32 4)
+// CHECK-NEXT: ret <4 x i32> %3
+// CHECK-NEXT: }
+llvm.func @binary_i_fixed_xv(%arg0: vector<4xi32>, %arg1: i32) -> vector<4xi32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1) <{opcode = 3 : i32}> : (vector<4xi32>, i32) -> vector<4xi32>
+ llvm.return %0 : vector<4xi32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x i32> @binary_i_fixed_vv(<4 x i32> %0, <4 x i32> %1) {
+// CHECK-NEXT: %3 = call <4 x i32> @llvm.riscv.sf.vc.v.vv.se.v4i32.i32.v4i32.v4i32.i32(i32 3, <4 x i32> %0, <4 x i32> %1, i32 4)
+// CHECK-NEXT: ret <4 x i32> %3
+// CHECK-NEXT: }
+llvm.func @binary_i_fixed_vv(%arg0: vector<4xi32>, %arg1: vector<4xi32>) -> vector<4xi32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1) <{opcode = 3 : i32}> : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+ llvm.return %0 : vector<4xi32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x i32> @binary_i_fixed_iv(<4 x i32> %0) {
+// CHECK-NEXT: %2 = call <4 x i32> @llvm.riscv.sf.vc.v.iv.se.v4i32.i32.v4i32.i32.i32(i32 3, <4 x i32> %0, i32 5, i32 4)
+// CHECK-NEXT: ret <4 x i32> %2
+// CHECK-NEXT: }
+llvm.func @binary_i_fixed_iv(%arg0: vector<4xi32>) -> vector<4xi32> {
+ %0 = "vcix.v.iv"(%arg0) <{opcode = 3 : i32, imm = 5 : i32}> : (vector<4xi32>) -> vector<4xi32>
+ llvm.return %0 : vector<4xi32>
+}
diff --git a/mlir/test/Target/LLVMIR/vcix-rv64.mlir b/mlir/test/Target/LLVMIR/vcix-rv64.mlir
new file mode 100644
index 00000000000000..47215cd44893e6
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/vcix-rv64.mlir
@@ -0,0 +1,178 @@
+// RUN: mlir-translate --mlir-to-llvmir %s | FileCheck %s
+
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_fv(<vscale x 4 x float> %0, float %1, i64 %2) {
+// CHECK-NEXT: %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.i64.nxv4f32.f32.i64(i64 1, <vscale x 4 x float> %0, float %1, i64 %2)
+// CHECK-NEXT: ret <vscale x 4 x float> %4
+// CHECK-NEXT: }
+llvm.func @binary_fv(%arg0: vector<[4]xf32>, %arg1: f32, %vl: i64) -> vector<[4]xf32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1, %vl) <{opcode = 1 : i64}> : (vector<[4]xf32>, f32, i64) -> vector<[4]xf32>
+ llvm.return %0 : vector<[4]xf32>
+}
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_xv(<vscale x 4 x float> %0, i64 %1, i64 %2) {
+// CHECK-NEXT: %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i64.nxv4f32.i64.i64(i64 3, <vscale x 4 x float> %0, i64 %1, i64 %2)
+// CHECK-NEXT: ret <vscale x 4 x float> %4
+// CHECK-NEXT: }
+llvm.func @binary_xv(%arg0: vector<[4]xf32>, %arg1: i64, %vl: i64) -> vector<[4]xf32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1, %vl) <{opcode = 3 : i64}> : (vector<[4]xf32>, i64, i64) -> vector<[4]xf32>
+ llvm.return %0 : vector<[4]xf32>
+}
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_vv(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) {
+// CHECK-NEXT: %4 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.i64.nxv4f32.nxv4f32.i64(i64 3, <vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2)
+// CHECK-NEXT: ret <vscale x 4 x float> %4
+// CHECK-NEXT: }
+llvm.func @binary_vv(%arg0: vector<[4]xf32>, %arg1: vector<[4]xf32>, %vl: i64) -> vector<[4]xf32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1, %vl) <{opcode = 3 : i64}> : (vector<[4]xf32>, vector<[4]xf32>, i64) -> vector<[4]xf32>
+ llvm.return %0 : vector<[4]xf32>
+}
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x float> @binary_iv(<vscale x 4 x float> %0, i64 %1) {
+// CHECK-NEXT: %3 = call <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.i64.nxv4f32.i64.i64(i64 3, <vscale x 4 x float> %0, i64 5, i64 %1)
+// CHECK-NEXT: ret <vscale x 4 x float> %3
+// CHECK-NEXT: }
+llvm.func @binary_iv(%arg0: vector<[4]xf32>, %vl: i64) -> vector<[4]xf32> {
+ %0 = "vcix.v.iv"(%arg0, %vl) <{opcode = 3 : i64, imm = 5 : i64}> : (vector<[4]xf32>, i64) -> vector<[4]xf32>
+ llvm.return %0 : vector<[4]xf32>
+}
+
+// -----
+
+// CHECK: define <4 x float> @binary_fixed_fv(<4 x float> %0, float %1) {
+// CHECK-NEXT: %3 = call <4 x float> @llvm.riscv.sf.vc.v.fv.se.v4f32.i64.v4f32.f32.i64(i64 1, <4 x float> %0, float %1, i64 4)
+// CHECK-NEXT: ret <4 x float> %3
+// CHECK-NEXT: }
+llvm.func @binary_fixed_fv(%arg0: vector<4xf32>, %arg1: f32) -> vector<4xf32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1) <{opcode = 1 : i64}> : (vector<4xf32>, f32) -> vector<4xf32>
+ llvm.return %0 : vector<4xf32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x float> @binary_fixed_xv(<4 x float> %0, i64 %1) {
+// CHECK-NEXT: %3 = call <4 x float> @llvm.riscv.sf.vc.v.xv.se.v4f32.i64.v4f32.i64.i64(i64 3, <4 x float> %0, i64 %1, i64 4)
+// CHECK-NEXT: ret <4 x float> %3
+// CHECK-NEXT: }
+llvm.func @binary_fixed_xv(%arg0: vector<4xf32>, %arg1: i64) -> vector<4xf32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1) <{opcode = 3 : i64}> : (vector<4xf32>, i64) -> vector<4xf32>
+ llvm.return %0 : vector<4xf32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x float> @binary_fixed_vv(<4 x float> %0, <4 x float> %1) {
+// CHECK-NEXT: %3 = call <4 x float> @llvm.riscv.sf.vc.v.vv.se.v4f32.i64.v4f32.v4f32.i64(i64 3, <4 x float> %0, <4 x float> %1, i64 4)
+// CHECK-NEXT: ret <4 x float> %3
+// CHECK-NEXT: }
+llvm.func @binary_fixed_vv(%arg0: vector<4xf32>, %arg1: vector<4xf32>) -> vector<4xf32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1) <{opcode = 3 : i64}> : (vector<4xf32>, vector<4xf32>) -> vector<4xf32>
+ llvm.return %0 : vector<4xf32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x float> @binary_fixed_iv(<4 x float> %0) {
+// CHECK-NEXT: %2 = call <4 x float> @llvm.riscv.sf.vc.v.iv.se.v4f32.i64.v4f32.i64.i64(i64 3, <4 x float> %0, i64 5, i64 4)
+// CHECK-NEXT: ret <4 x float> %2
+// CHECK-NEXT: }
+llvm.func @binary_fixed_iv(%arg0: vector<4xf32>) -> vector<4xf32> {
+ %0 = "vcix.v.iv"(%arg0) <{opcode = 3 : i64, imm = 5 : i64}> : (vector<4xf32>) -> vector<4xf32>
+ llvm.return %0 : vector<4xf32>
+}
+
+// Test integer type
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x i32> @binary_i_fv(<vscale x 4 x i32> %0, float %1, i64 %2) {
+// CHECK-NEXT: %4 = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.i64.nxv4i32.f32.i64(i64 1, <vscale x 4 x i32> %0, float %1, i64 %2)
+// CHECK-NEXT: ret <vscale x 4 x i32> %4
+// CHECK-NEXT: }
+llvm.func @binary_i_fv(%arg0: vector<[4]xi32>, %arg1: f32, %vl: i64) -> vector<[4]xi32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1, %vl) <{opcode = 1 : i64}> : (vector<[4]xi32>, f32, i64) -> vector<[4]xi32>
+ llvm.return %0 : vector<[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x i32> @binary_i_xv(<vscale x 4 x i32> %0, i64 %1, i64 %2) {
+// CHECK-NEXT: %4 = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i64.nxv4i32.i64.i64(i64 3, <vscale x 4 x i32> %0, i64 %1, i64 %2)
+// CHECK-NEXT: ret <vscale x 4 x i32> %4
+// CHECK-NEXT: }
+llvm.func @binary_i_xv(%arg0: vector<[4]xi32>, %arg1: i64, %vl: i64) -> vector<[4]xi32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1, %vl) <{opcode = 3 : i64}> : (vector<[4]xi32>, i64, i64) -> vector<[4]xi32>
+ llvm.return %0 : vector<[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x i32> @binary_i_vv(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) {
+// CHECK-NEXT: %4 = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.i64.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2)
+// CHECK-NEXT: ret <vscale x 4 x i32> %4
+// CHECK-NEXT: }
+llvm.func @binary_i_vv(%arg0: vector<[4]xi32>, %arg1: vector<[4]xi32>, %vl: i64) -> vector<[4]xi32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1, %vl) <{opcode = 3 : i64}> : (vector<[4]xi32>, vector<[4]xi32>, i64) -> vector<[4]xi32>
+ llvm.return %0 : vector<[4]xi32>
+}
+
+// -----
+
+// CHECK-LABEL: define <vscale x 4 x i32> @binary_i_iv(<vscale x 4 x i32> %0, i64 %1) {
+// CHECK-NEXT: %3 = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.i64.nxv4i32.i64.i64(i64 3, <vscale x 4 x i32> %0, i64 5, i64 %1)
+// CHECK-NEXT: ret <vscale x 4 x i32> %3
+// CHECK-NEXT: }
+llvm.func @binary_i_iv(%arg0: vector<[4]xi32>, %vl: i64) -> vector<[4]xi32> {
+ %0 = "vcix.v.iv"(%arg0, %vl) <{opcode = 3 : i64, imm = 5 : i64}> : (vector<[4]xi32>, i64) -> vector<[4]xi32>
+ llvm.return %0 : vector<[4]xi32>
+}
+
+// -----
+
+// CHECK: define <4 x i32> @binary_i_fixed_fv(<4 x i32> %0, float %1) {
+// CHECK-NEXT: %3 = call <4 x i32> @llvm.riscv.sf.vc.v.fv.se.v4i32.i64.v4i32.f32.i64(i64 1, <4 x i32> %0, float %1, i64 4)
+// CHECK-NEXT: ret <4 x i32> %3
+// CHECK-NEXT: }
+llvm.func @binary_i_fixed_fv(%arg0: vector<4xi32>, %arg1: f32) -> vector<4xi32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1) <{opcode = 1 : i64}> : (vector<4xi32>, f32) -> vector<4xi32>
+ llvm.return %0 : vector<4xi32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x i32> @binary_i_fixed_xv(<4 x i32> %0, i64 %1) {
+// CHECK-NEXT: %3 = call <4 x i32> @llvm.riscv.sf.vc.v.xv.se.v4i32.i64.v4i32.i64.i64(i64 3, <4 x i32> %0, i64 %1, i64 4)
+// CHECK-NEXT: ret <4 x i32> %3
+// CHECK-NEXT: }
+llvm.func @binary_i_fixed_xv(%arg0: vector<4xi32>, %arg1: i64) -> vector<4xi32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1) <{opcode = 3 : i64}> : (vector<4xi32>, i64) -> vector<4xi32>
+ llvm.return %0 : vector<4xi32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x i32> @binary_i_fixed_vv(<4 x i32> %0, <4 x i32> %1) {
+// CHECK-NEXT: %3 = call <4 x i32> @llvm.riscv.sf.vc.v.vv.se.v4i32.i64.v4i32.v4i32.i64(i64 3, <4 x i32> %0, <4 x i32> %1, i64 4)
+// CHECK-NEXT: ret <4 x i32> %3
+// CHECK-NEXT: }
+llvm.func @binary_i_fixed_vv(%arg0: vector<4xi32>, %arg1: vector<4xi32>) -> vector<4xi32> {
+ %0 = "vcix.v.sv"(%arg0, %arg1) <{opcode = 3 : i64}> : (vector<4xi32>, vector<4xi32>) -> vector<4xi32>
+ llvm.return %0 : vector<4xi32>
+}
+
+// -----
+
+// CHECK-LABEL: define <4 x i32> @binary_i_fixed_iv(<4 x i32> %0) {
+// CHECK-NEXT: %2 = call <4 x i32> @llvm.riscv.sf.vc.v.iv.se.v4i32.i64.v4i32.i64.i64(i64 3, <4 x i32> %0, i64 5, i64 4)
+// CHECK-NEXT: ret <4 x i32> %2
+// CHECK-NEXT: }
+llvm.func @binary_i_fixed_iv(%arg0: vector<4xi32>) -> vector<4xi32> {
+ %0 = "vcix.v.iv"(%arg0) <{opcode = 3 : i64, imm = 5 : i64}> : (vector<4xi32>) -> vector<4xi32>
+ llvm.return %0 : vector<4xi32>
+}
diff --git a/mlir/test/lib/Conversion/CMakeLists.txt b/mlir/test/lib/Conversion/CMakeLists.txt
index 14df652ac7dfd0..754c9866d18e44 100644
--- a/mlir/test/lib/Conversion/CMakeLists.txt
+++ b/mlir/test/lib/Conversion/CMakeLists.txt
@@ -1,3 +1,4 @@
add_subdirectory(FuncToLLVM)
+add_subdirectory(MathToVCIX)
add_subdirectory(OneToNTypeConversion)
add_subdirectory(VectorToSPIRV)
diff --git a/mlir/test/lib/Conversion/MathToVCIX/CMakeLists.txt b/mlir/test/lib/Conversion/MathToVCIX/CMakeLists.txt
new file mode 100644
index 00000000000000..933e84722fbba8
--- /dev/null
+++ b/mlir/test/lib/Conversion/MathToVCIX/CMakeLists.txt
@@ -0,0 +1,15 @@
+# Exclude tests from libMLIR.so
+add_mlir_library(MLIRTestMathToVCIX
+ TestMathToVCIXConversion.cpp
+
+ EXCLUDE_FROM_LIBMLIR
+
+ LINK_LIBS PUBLIC
+ MLIRArithDialect
+ MLIRFuncDialect
+ MLIRMathDialect
+ MLIRVCIXDialect
+ MLIRVectorDialect
+ MLIRPass
+ MLIRTransforms
+)
diff --git a/mlir/test/lib/Conversion/MathToVCIX/TestMathToVCIXConversion.cpp b/mlir/test/lib/Conversion/MathToVCIX/TestMathToVCIXConversion.cpp
new file mode 100644
index 00000000000000..c8bee817213d8d
--- /dev/null
+++ b/mlir/test/lib/Conversion/MathToVCIX/TestMathToVCIXConversion.cpp
@@ -0,0 +1,260 @@
+//===- TestMathToVCIXConversion.cpp - Test conversion to VCIX ops ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/Arith/IR/Arith.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Dialect/LLVMIR/VCIXDialect.h"
+#include "mlir/Dialect/Math/IR/Math.h"
+#include "mlir/Dialect/Vector/IR/VectorOps.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/Pass/Pass.h"
+#include "mlir/Pass/PassManager.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+
+namespace mlir {
+namespace {
+
+/// Return number of extracts required to make input VectorType \vt legal and
+/// also return thatlegal vector type.
+/// For fixed vectors nothing special is needed. Scalable vectors are legalizes
+/// according to LLVM's encoding:
+/// https://lists.llvm.org/pipermail/llvm-dev/2020-October/145850.html
+static std::pair<unsigned, VectorType> legalizeVectorType(const Type &type) {
+ VectorType vt = type.cast<VectorType>();
+ // To simplify test pass, avoid multi-dimensional vectors.
+ if (!vt || vt.getRank() != 1)
+ return {0, nullptr};
+
+ if (!vt.isScalable())
+ return {1, vt};
+
+ Type eltTy = vt.getElementType();
+ unsigned sew = 0;
+ if (eltTy.isF32())
+ sew = 32;
+ else if (eltTy.isF64())
+ sew = 64;
+ else if (auto intTy = eltTy.dyn_cast<IntegerType>())
+ sew = intTy.getWidth();
+ else
+ return {0, nullptr};
+
+ unsigned eltCount = vt.getShape()[0];
+ const unsigned lmul = eltCount * sew / 64;
+
+ unsigned n = lmul > 8 ? llvm::Log2_32(lmul) - 2 : 1;
+ return {n, VectorType::get({eltCount >> (n - 1)}, eltTy, {true})};
+}
+
+/// Replace math.cos(v) operation with vcix.v.iv(v).
+struct MathCosToVCIX final : OpRewritePattern<math::CosOp> {
+ using OpRewritePattern::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(math::CosOp op,
+ PatternRewriter &rewriter) const override {
+ const Type opType = op.getOperand().getType();
+ auto [n, legalType] = legalizeVectorType(opType);
+ if (!legalType)
+ return rewriter.notifyMatchFailure(op, "cannot legalize type for RVV");
+ Location loc = op.getLoc();
+ Value vec = op.getOperand();
+ Attribute immAttr = rewriter.getI32IntegerAttr(0);
+ Attribute opcodeAttr = rewriter.getI64IntegerAttr(0);
+ Value rvl = nullptr;
+ if (legalType.isScalable())
+ // Use arbitrary runtime vector length when vector type is scalable.
+ // Proper conversion pass should take it from the IR.
+ rvl = rewriter.create<arith::ConstantOp>(loc,
+ rewriter.getI64IntegerAttr(9));
+ Value res;
+ if (n == 1) {
+ res = rewriter.create<vcix::BinaryImmOp>(loc, legalType, opcodeAttr, vec,
+ immAttr, rvl);
+ } else {
+ const unsigned eltCount = legalType.getShape()[0];
+ Type eltTy = legalType.getElementType();
+ Value zero = rewriter.create<arith::ConstantOp>(
+ loc, eltTy, rewriter.getZeroAttr(eltTy));
+ res = rewriter.create<vector::BroadcastOp>(loc, opType, zero /*dummy*/);
+ for (unsigned i = 0; i < n; ++i) {
+ Value extracted = rewriter.create<vector::ScalableExtractOp>(
+ loc, legalType, vec, i * eltCount);
+ Value v = rewriter.create<vcix::BinaryImmOp>(loc, legalType, opcodeAttr,
+ extracted, immAttr, rvl);
+ res = rewriter.create<vector::ScalableInsertOp>(loc, v, res,
+ i * eltCount);
+ }
+ }
+ rewriter.replaceOp(op, res);
+ return success();
+ }
+};
+
+// Replace math.sin(v) operation with vcix.v.sv(v, v).
+struct MathSinToVCIX final : OpRewritePattern<math::SinOp> {
+ using OpRewritePattern::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(math::SinOp op,
+ PatternRewriter &rewriter) const override {
+ const Type opType = op.getOperand().getType();
+ auto [n, legalType] = legalizeVectorType(opType);
+ if (!legalType)
+ return rewriter.notifyMatchFailure(op, "cannot legalize type for RVV");
+ Location loc = op.getLoc();
+ Value vec = op.getOperand();
+ Attribute opcodeAttr = rewriter.getI64IntegerAttr(0);
+ Value rvl = nullptr;
+ if (legalType.isScalable())
+ // Use arbitrary runtime vector length when vector type is scalable.
+ // Proper conversion pass should take it from the IR.
+ rvl = rewriter.create<arith::ConstantOp>(loc,
+ rewriter.getI64IntegerAttr(9));
+ Value res;
+ if (n == 1) {
+ res = rewriter.create<vcix::BinaryOp>(loc, legalType, opcodeAttr, vec,
+ vec, rvl);
+ } else {
+ const unsigned eltCount = legalType.getShape()[0];
+ Type eltTy = legalType.getElementType();
+ Value zero = rewriter.create<arith::ConstantOp>(
+ loc, eltTy, rewriter.getZeroAttr(eltTy));
+ res = rewriter.create<vector::BroadcastOp>(loc, opType, zero /*dummy*/);
+ for (unsigned i = 0; i < n; ++i) {
+ Value extracted = rewriter.create<vector::ScalableExtractOp>(
+ loc, legalType, vec, i * eltCount);
+ Value v = rewriter.create<vcix::BinaryOp>(loc, legalType, opcodeAttr,
+ extracted, extracted, rvl);
+ res = rewriter.create<vector::ScalableInsertOp>(loc, v, res,
+ i * eltCount);
+ }
+ }
+ rewriter.replaceOp(op, res);
+ return success();
+ }
+};
+
+// Replace math.tan(v) operation with vcix.v.sv(v, 0.0f).
+struct MathTanToVCIX final : OpRewritePattern<math::TanOp> {
+ using OpRewritePattern::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(math::TanOp op,
+ PatternRewriter &rewriter) const override {
+ const Type opType = op.getOperand().getType();
+ auto [n, legalType] = legalizeVectorType(opType);
+ Type eltTy = legalType.getElementType();
+ if (!legalType)
+ return rewriter.notifyMatchFailure(op, "cannot legalize type for RVV");
+ Location loc = op.getLoc();
+ Value vec = op.getOperand();
+ Attribute opcodeAttr = rewriter.getI64IntegerAttr(0);
+ Value zero = rewriter.create<arith::ConstantOp>(
+ loc, eltTy, rewriter.getZeroAttr(eltTy));
+ Value rvl = nullptr;
+ if (legalType.isScalable())
+ // Use arbitrary runtime vector length when vector type is scalable.
+ // Proper conversion pass should take it from the IR.
+ rvl = rewriter.create<arith::ConstantOp>(loc,
+ rewriter.getI64IntegerAttr(9));
+ Value res;
+ if (n == 1) {
+ res = rewriter.create<vcix::BinaryOp>(loc, legalType, opcodeAttr, vec,
+ zero, rvl);
+ } else {
+ const unsigned eltCount = legalType.getShape()[0];
+ res = rewriter.create<vector::BroadcastOp>(loc, opType, zero /*dummy*/);
+ for (unsigned i = 0; i < n; ++i) {
+ Value extracted = rewriter.create<vector::ScalableExtractOp>(
+ loc, legalType, vec, i * eltCount);
+ Value v = rewriter.create<vcix::BinaryOp>(loc, legalType, opcodeAttr,
+ extracted, zero, rvl);
+ res = rewriter.create<vector::ScalableInsertOp>(loc, v, res,
+ i * eltCount);
+ }
+ }
+ rewriter.replaceOp(op, res);
+ return success();
+ }
+};
+
+// Replace math.log(v) operation with vcix.v.sv(v, 0).
+struct MathLogToVCIX final : OpRewritePattern<math::LogOp> {
+ using OpRewritePattern::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(math::LogOp op,
+ PatternRewriter &rewriter) const override {
+ const Type opType = op.getOperand().getType();
+ auto [n, legalType] = legalizeVectorType(opType);
+ if (!legalType)
+ return rewriter.notifyMatchFailure(op, "cannot legalize type for RVV");
+ Location loc = op.getLoc();
+ Value vec = op.getOperand();
+ Attribute opcodeAttr = rewriter.getI64IntegerAttr(0);
+ Value rvl = nullptr;
+ Value zeroInt = rewriter.create<arith::ConstantOp>(
+ loc, rewriter.getI32Type(), rewriter.getI32IntegerAttr(0));
+ if (legalType.isScalable())
+ // Use arbitrary runtime vector length when vector type is scalable.
+ // Proper conversion pass should take it from the IR.
+ rvl = rewriter.create<arith::ConstantOp>(loc,
+ rewriter.getI64IntegerAttr(9));
+ Value res;
+ if (n == 1) {
+ res = rewriter.create<vcix::BinaryOp>(loc, legalType, opcodeAttr, vec,
+ zeroInt, rvl);
+ } else {
+ const unsigned eltCount = legalType.getShape()[0];
+ Type eltTy = legalType.getElementType();
+ Value zero = rewriter.create<arith::ConstantOp>(
+ loc, eltTy, rewriter.getZeroAttr(eltTy));
+ res = rewriter.create<vector::BroadcastOp>(loc, opType, zero /*dummy*/);
+ for (unsigned i = 0; i < n; ++i) {
+ Value extracted = rewriter.create<vector::ScalableExtractOp>(
+ loc, legalType, vec, i * eltCount);
+ Value v = rewriter.create<vcix::BinaryOp>(loc, legalType, opcodeAttr,
+ extracted, zeroInt, rvl);
+ res = rewriter.create<vector::ScalableInsertOp>(loc, v, res,
+ i * eltCount);
+ }
+ }
+ rewriter.replaceOp(op, res);
+ return success();
+ }
+};
+
+struct TestMathToVCIX
+ : PassWrapper<TestMathToVCIX, OperationPass<func::FuncOp>> {
+ MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestMathToVCIX)
+
+ StringRef getArgument() const final { return "test-math-to-vcix"; }
+
+ StringRef getDescription() const final {
+ return "Test lowering patterns that converts some vector operations to "
+ "VCIX. Since DLA can implement VCIX instructions in completely "
+ "
diff erent way, conversions of that test pass only lives here.";
+ }
+
+ void getDependentDialects(DialectRegistry ®istry) const override {
+ registry.insert<arith::ArithDialect, func::FuncDialect, math::MathDialect,
+ vcix::VCIXDialect, vector::VectorDialect>();
+ }
+
+ void runOnOperation() override {
+ MLIRContext *ctx = &getContext();
+ RewritePatternSet patterns(ctx);
+ patterns.add<MathCosToVCIX, MathSinToVCIX, MathTanToVCIX, MathLogToVCIX>(
+ ctx);
+ (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
+ }
+};
+
+} // namespace
+
+namespace test {
+void registerTestMathToVCIXPass() { PassRegistration<TestMathToVCIX>(); }
+} // namespace test
+} // namespace mlir
diff --git a/mlir/tools/mlir-opt/CMakeLists.txt b/mlir/tools/mlir-opt/CMakeLists.txt
index 9ad5b32c24f9de..68aa6bad5f92c5 100644
--- a/mlir/tools/mlir-opt/CMakeLists.txt
+++ b/mlir/tools/mlir-opt/CMakeLists.txt
@@ -25,6 +25,7 @@ if(MLIR_INCLUDE_TESTS)
MLIRLinalgTestPasses
MLIRLoopLikeInterfaceTestPasses
MLIRMathTestPasses
+ MLIRTestMathToVCIX
MLIRMemRefTestPasses
MLIRMeshTest
MLIRNVGPUTestPasses
diff --git a/mlir/tools/mlir-opt/mlir-opt.cpp b/mlir/tools/mlir-opt/mlir-opt.cpp
index 52da9e98603c67..1b3f60b4b3abd3 100644
--- a/mlir/tools/mlir-opt/mlir-opt.cpp
+++ b/mlir/tools/mlir-opt/mlir-opt.cpp
@@ -114,6 +114,7 @@ void registerTestMakeIsolatedFromAbovePass();
void registerTestMatchReductionPass();
void registerTestMathAlgebraicSimplificationPass();
void registerTestMathPolynomialApproximationPass();
+void registerTestMathToVCIXPass();
void registerTestMemRefDependenceCheck();
void registerTestMemRefStrideCalculation();
void registerTestMeshSimplificationsPass();
@@ -236,6 +237,7 @@ void registerTestPasses() {
mlir::test::registerTestMatchReductionPass();
mlir::test::registerTestMathAlgebraicSimplificationPass();
mlir::test::registerTestMathPolynomialApproximationPass();
+ mlir::test::registerTestMathToVCIXPass();
mlir::test::registerTestMemRefDependenceCheck();
mlir::test::registerTestMemRefStrideCalculation();
mlir::test::registerTestMultiIndexOpLoweringPass();
More information about the Mlir-commits
mailing list