[llvm-branch-commits] [llvm] fc7b7fc - [RISCV] Add intrinsics for vmv.v.v, vmv.v.x, and vmv.x.i
Craig Topper via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Dec 18 09:54:44 PST 2020
Author: Craig Topper
Date: 2020-12-18T09:49:07-08:00
New Revision: fc7b7fc066946f83b04928d80242fcffbf23323e
URL: https://github.com/llvm/llvm-project/commit/fc7b7fc066946f83b04928d80242fcffbf23323e
DIFF: https://github.com/llvm/llvm-project/commit/fc7b7fc066946f83b04928d80242fcffbf23323e.diff
LOG: [RISCV] Add intrinsics for vmv.v.v, vmv.v.x, and vmv.x.i
We work with @rogfer01 from BSC to come out this patch.
Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Craig Topper <craig.topper at sifive.com>
Differential Revision: https://reviews.llvm.org/D93514
Added:
llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
Modified:
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 6d20f39a4020..c207100adb7e 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -229,6 +229,14 @@ let TargetPrefix = "riscv" in {
let ExtendOperand = 3;
}
+ // For vmv.v.v, vmv.v.x, vmv.v.i
+ // Input: (vector_in/scalar_in, vl)
+ class RISCVUnary : Intrinsic<[llvm_anyvector_ty],
+ [llvm_any_ty, llvm_anyint_ty],
+ [IntrNoMem] >, RISCVVIntrinsic {
+ let ExtendOperand = 1;
+ }
+
multiclass RISCVUSLoad {
def "int_riscv_" # NAME : RISCVUSLoad;
def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
@@ -328,4 +336,7 @@ let TargetPrefix = "riscv" in {
defm vsadd : RISCVSaturatingBinaryAAX;
defm vssubu : RISCVSaturatingBinaryAAX;
defm vssub : RISCVSaturatingBinaryAAX;
+
+ def int_riscv_vmv_v_v : RISCVUnary;
+ def int_riscv_vmv_v_x : RISCVUnary;
} // TargetPrefix = "riscv"
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 3363aed34f39..bd4694633791 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -401,6 +401,22 @@ class VPseudoSStoreMask<VReg StClass>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
+// Unary instruction that is never masked so HasDummyMask=0.
+class VPseudoUnaryNoDummyMask<VReg RetClass,
+ DAGOperand Op2Class> :
+ Pseudo<(outs RetClass:$rd),
+ (ins Op2Class:$rs1, GPR:$vl, ixlenimm:$sew), []>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let usesCustomInserter = 1;
+ let Uses = [VL, VTYPE];
+ let VLIndex = 2;
+ let SEWIndex = 3;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
class VPseudoBinaryNoMask<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
@@ -621,6 +637,16 @@ multiclass VPseudoBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1,
m.vrclass, simm5, m, CarryIn, Constraint>;
}
+multiclass VPseudoUnaryV_V_X_I_NoDummyMask {
+ foreach m = MxList.m in {
+ let VLMul = m.value in {
+ def "_V_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, m.vrclass>;
+ def "_X_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, GPR>;
+ def "_I_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, simm5>;
+ }
+ }
+}
+
multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5> {
defm "" : VPseudoBinaryV_VV;
defm "" : VPseudoBinaryV_VX</*IsFloat=*/0>;
@@ -1295,6 +1321,12 @@ defm PseudoVWMUL : VPseudoBinaryW_VV_VX;
defm PseudoVWMULU : VPseudoBinaryW_VV_VX;
defm PseudoVWMULSU : VPseudoBinaryW_VV_VX;
+//===----------------------------------------------------------------------===//
+// 12.17. Vector Integer Move Instructions
+//===----------------------------------------------------------------------===//
+
+defm PseudoVMV_V : VPseudoUnaryV_V_X_I_NoDummyMask;
+
//===----------------------------------------------------------------------===//
// 13.1. Vector Single-Width Saturating Add and Subtract
//===----------------------------------------------------------------------===//
@@ -1444,6 +1476,25 @@ defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU">;
+//===----------------------------------------------------------------------===//
+// 12.17. Vector Integer Move Instructions
+//===----------------------------------------------------------------------===//
+foreach vti = AllVectors in {
+ def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$rs1),
+ GPR:$vl)),
+ (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
+ $rs1, (NoX0 GPR:$vl), vti.SEW)>;
+}
+
+foreach vti = AllIntegerVectors in {
+ def : Pat<(vti.Vector (int_riscv_vmv_v_x GPR:$rs2, GPR:$vl)),
+ (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX)
+ $rs2, (NoX0 GPR:$vl), vti.SEW)>;
+ def : Pat<(vti.Vector (int_riscv_vmv_v_x simm5:$imm5, GPR:$vl)),
+ (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX)
+ simm5:$imm5, (NoX0 GPR:$vl), vti.SEW)>;
+}
+
//===----------------------------------------------------------------------===//
// 13.1. Vector Single-Width Saturating Add and Subtract
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll
new file mode 100644
index 000000000000..d22ac605a20b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll
@@ -0,0 +1,505 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+ i8,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vmv.v.x_x_nxv1i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,mf8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+ i8,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vmv.v.x_x_nxv2i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,mf4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+ i8,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vmv.v.x_x_nxv4i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,mf2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+ i8,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vmv.v.x_x_nxv8i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+ i8,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vmv.v.x_x_nxv16i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+ i8,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vmv.v.x_x_nxv32i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+ i8,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vmv.v.x_x_nxv64i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+ i16,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vmv.v.x_x_nxv1i16_i16(i16 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,mf4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+ i16 %0,
+ i32 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+ i16,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vmv.v.x_x_nxv2i16_i16(i16 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,mf2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+ i16 %0,
+ i32 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+ i16,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vmv.v.x_x_nxv4i16_i16(i16 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+ i16 %0,
+ i32 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+ i16,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vmv.v.x_x_nxv8i16_i16(i16 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+ i16 %0,
+ i32 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+ i16,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vmv.v.x_x_nxv16i16_i16(i16 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+ i16 %0,
+ i32 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+ i16,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vmv.v.x_x_nxv32i16_i16(i16 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+ i16 %0,
+ i32 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+ i32,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vmv.v.x_x_nxv1i32_i32(i32 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,mf2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+ i32 %0,
+ i32 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+ i32,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vmv.v.x_x_nxv2i32_i32(i32 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+ i32 %0,
+ i32 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+ i32,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vmv.v.x_x_nxv4i32_i32(i32 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+ i32 %0,
+ i32 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+ i32,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vmv.v.x_x_nxv8i32_i32(i32 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+ i32 %0,
+ i32 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+ i32,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vmv.v.x_x_nxv16i32_i32(i32 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+ i32 %0,
+ i32 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vmv.v.x_i_nxv1i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vmv.v.x_i_nxv2i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vmv.v.x_i_nxv4i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vmv.v.x_i_nxv8i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vmv.v.x_i_nxv16i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vmv.v.x_i_nxv32i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vmv.v.x_i_nxv64i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vmv.v.x_i_nxv1i16_i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,mf4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+ i16 9,
+ i32 %0)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vmv.v.x_i_nxv2i16_i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,mf2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+ i16 9,
+ i32 %0)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vmv.v.x_i_nxv4i16_i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+ i16 9,
+ i32 %0)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vmv.v.x_i_nxv8i16_i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+ i16 9,
+ i32 %0)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vmv.v.x_i_nxv16i16_i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+ i16 9,
+ i32 %0)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vmv.v.x_i_nxv32i16_i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+ i16 9,
+ i32 %0)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vmv.v.x_i_nxv1i32_i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,mf2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+ i32 9,
+ i32 %0)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vmv.v.x_i_nxv2i32_i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+ i32 9,
+ i32 %0)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vmv.v.x_i_nxv4i32_i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+ i32 9,
+ i32 %0)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vmv.v.x_i_nxv8i32_i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+ i32 9,
+ i32 %0)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vmv.v.x_i_nxv16i32_i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+ i32 9,
+ i32 %0)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll
new file mode 100644
index 000000000000..21b22f6c3f0f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll
@@ -0,0 +1,617 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+ i8,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vmv.v.x_x_nxv1i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,mf8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+ i8,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vmv.v.x_x_nxv2i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,mf4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+ i8,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vmv.v.x_x_nxv4i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,mf2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+ i8,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vmv.v.x_x_nxv8i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+ i8,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vmv.v.x_x_nxv16i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+ i8,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vmv.v.x_x_nxv32i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+ i8,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vmv.v.x_x_nxv64i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+ i16,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vmv.v.x_x_nxv1i16_i16(i16 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,mf4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+ i16 %0,
+ i64 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+ i16,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vmv.v.x_x_nxv2i16_i16(i16 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,mf2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+ i16 %0,
+ i64 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+ i16,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vmv.v.x_x_nxv4i16_i16(i16 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+ i16 %0,
+ i64 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+ i16,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vmv.v.x_x_nxv8i16_i16(i16 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+ i16 %0,
+ i64 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+ i16,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vmv.v.x_x_nxv16i16_i16(i16 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+ i16 %0,
+ i64 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+ i16,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vmv.v.x_x_nxv32i16_i16(i16 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+ i16 %0,
+ i64 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+ i32,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vmv.v.x_x_nxv1i32_i32(i32 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,mf2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+ i32 %0,
+ i64 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+ i32,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vmv.v.x_x_nxv2i32_i32(i32 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+ i32 %0,
+ i64 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+ i32,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vmv.v.x_x_nxv4i32_i32(i32 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+ i32 %0,
+ i64 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+ i32,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vmv.v.x_x_nxv8i32_i32(i32 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+ i32 %0,
+ i64 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+ i32,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vmv.v.x_x_nxv16i32_i32(i32 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+ i32 %0,
+ i64 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(
+ i64,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vmv.v.x_x_nxv1i64_i64(i64 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, a1, e64,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(
+ i64 %0,
+ i64 %1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(
+ i64,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vmv.v.x_x_nxv2i64_i64(i64 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, a1, e64,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(
+ i64 %0,
+ i64 %1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(
+ i64,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vmv.v.x_x_nxv4i64_i64(i64 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, a1, e64,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(
+ i64 %0,
+ i64 %1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(
+ i64,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vmv.v.x_x_nxv8i64_i64(i64 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, a1, e64,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(
+ i64 %0,
+ i64 %1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vmv.v.x_i_nxv1i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vmv.v.x_i_nxv2i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vmv.v.x_i_nxv4i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vmv.v.x_i_nxv8i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vmv.v.x_i_nxv16i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vmv.v.x_i_nxv32i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vmv.v.x_i_nxv64i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vmv.v.x_i_nxv1i16_i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,mf4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+ i16 9,
+ i64 %0)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vmv.v.x_i_nxv2i16_i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,mf2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+ i16 9,
+ i64 %0)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vmv.v.x_i_nxv4i16_i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+ i16 9,
+ i64 %0)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vmv.v.x_i_nxv8i16_i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+ i16 9,
+ i64 %0)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vmv.v.x_i_nxv16i16_i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+ i16 9,
+ i64 %0)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vmv.v.x_i_nxv32i16_i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+ i16 9,
+ i64 %0)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vmv.v.x_i_nxv1i32_i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,mf2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+ i32 9,
+ i64 %0)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vmv.v.x_i_nxv2i32_i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+ i32 9,
+ i64 %0)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vmv.v.x_i_nxv4i32_i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+ i32 9,
+ i64 %0)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vmv.v.x_i_nxv8i32_i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+ i32 9,
+ i64 %0)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vmv.v.x_i_nxv16i32_i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+ i32 9,
+ i64 %0)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vmv.v.x_i_nxv1i64_i64(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, a0, e64,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(
+ i64 9,
+ i64 %0)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vmv.v.x_i_nxv2i64_i64(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, a0, e64,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(
+ i64 9,
+ i64 %0)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vmv.v.x_i_nxv4i64_i64(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, a0, e64,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(
+ i64 9,
+ i64 %0)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vmv.v.x_i_nxv8i64_i64(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, a0, e64,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(
+ i64 9,
+ i64 %0)
+
+ ret <vscale x 8 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
new file mode 100644
index 000000000000..d22ac605a20b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
@@ -0,0 +1,505 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+ i8,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vmv.v.x_x_nxv1i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,mf8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+ i8,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vmv.v.x_x_nxv2i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,mf4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+ i8,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vmv.v.x_x_nxv4i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,mf2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+ i8,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vmv.v.x_x_nxv8i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+ i8,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vmv.v.x_x_nxv16i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+ i8,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vmv.v.x_x_nxv32i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+ i8,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vmv.v.x_x_nxv64i8_i8(i8 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+ i8 %0,
+ i32 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+ i16,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vmv.v.x_x_nxv1i16_i16(i16 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,mf4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+ i16 %0,
+ i32 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+ i16,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vmv.v.x_x_nxv2i16_i16(i16 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,mf2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+ i16 %0,
+ i32 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+ i16,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vmv.v.x_x_nxv4i16_i16(i16 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+ i16 %0,
+ i32 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+ i16,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vmv.v.x_x_nxv8i16_i16(i16 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+ i16 %0,
+ i32 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+ i16,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vmv.v.x_x_nxv16i16_i16(i16 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+ i16 %0,
+ i32 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+ i16,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vmv.v.x_x_nxv32i16_i16(i16 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+ i16 %0,
+ i32 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+ i32,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vmv.v.x_x_nxv1i32_i32(i32 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,mf2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+ i32 %0,
+ i32 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+ i32,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vmv.v.x_x_nxv2i32_i32(i32 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+ i32 %0,
+ i32 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+ i32,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vmv.v.x_x_nxv4i32_i32(i32 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+ i32 %0,
+ i32 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+ i32,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vmv.v.x_x_nxv8i32_i32(i32 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+ i32 %0,
+ i32 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+ i32,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vmv.v.x_x_nxv16i32_i32(i32 %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+ i32 %0,
+ i32 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vmv.v.x_i_nxv1i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vmv.v.x_i_nxv2i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vmv.v.x_i_nxv4i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vmv.v.x_i_nxv8i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vmv.v.x_i_nxv16i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vmv.v.x_i_nxv32i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vmv.v.x_i_nxv64i8_i8(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+ i8 9,
+ i32 %0)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vmv.v.x_i_nxv1i16_i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,mf4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+ i16 9,
+ i32 %0)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vmv.v.x_i_nxv2i16_i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,mf2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+ i16 9,
+ i32 %0)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vmv.v.x_i_nxv4i16_i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+ i16 9,
+ i32 %0)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vmv.v.x_i_nxv8i16_i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+ i16 9,
+ i32 %0)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vmv.v.x_i_nxv16i16_i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+ i16 9,
+ i32 %0)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vmv.v.x_i_nxv32i16_i16(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+ i16 9,
+ i32 %0)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vmv.v.x_i_nxv1i32_i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,mf2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+ i32 9,
+ i32 %0)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vmv.v.x_i_nxv2i32_i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+ i32 9,
+ i32 %0)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vmv.v.x_i_nxv4i32_i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+ i32 9,
+ i32 %0)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vmv.v.x_i_nxv8i32_i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+ i32 9,
+ i32 %0)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vmv.v.x_i_nxv16i32_i32(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+ i32 9,
+ i32 %0)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
new file mode 100644
index 000000000000..21b22f6c3f0f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
@@ -0,0 +1,617 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+ i8,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vmv.v.x_x_nxv1i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,mf8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+ i8,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vmv.v.x_x_nxv2i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,mf4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+ i8,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vmv.v.x_x_nxv4i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,mf2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+ i8,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vmv.v.x_x_nxv8i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+ i8,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vmv.v.x_x_nxv16i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+ i8,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vmv.v.x_x_nxv32i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+ i8,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vmv.v.x_x_nxv64i8_i8(i8 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, a1, e8,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+ i8 %0,
+ i64 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+ i16,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vmv.v.x_x_nxv1i16_i16(i16 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,mf4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+ i16 %0,
+ i64 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+ i16,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vmv.v.x_x_nxv2i16_i16(i16 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,mf2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+ i16 %0,
+ i64 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+ i16,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vmv.v.x_x_nxv4i16_i16(i16 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+ i16 %0,
+ i64 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+ i16,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vmv.v.x_x_nxv8i16_i16(i16 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+ i16 %0,
+ i64 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+ i16,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vmv.v.x_x_nxv16i16_i16(i16 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+ i16 %0,
+ i64 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+ i16,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vmv.v.x_x_nxv32i16_i16(i16 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, a1, e16,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+ i16 %0,
+ i64 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+ i32,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vmv.v.x_x_nxv1i32_i32(i32 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,mf2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+ i32 %0,
+ i64 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+ i32,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vmv.v.x_x_nxv2i32_i32(i32 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+ i32 %0,
+ i64 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+ i32,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vmv.v.x_x_nxv4i32_i32(i32 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+ i32 %0,
+ i64 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+ i32,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vmv.v.x_x_nxv8i32_i32(i32 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+ i32 %0,
+ i64 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+ i32,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vmv.v.x_x_nxv16i32_i32(i32 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, a1, e32,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+ i32 %0,
+ i64 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(
+ i64,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vmv.v.x_x_nxv1i64_i64(i64 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, a1, e64,m1
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(
+ i64 %0,
+ i64 %1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(
+ i64,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vmv.v.x_x_nxv2i64_i64(i64 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, a1, e64,m2
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(
+ i64 %0,
+ i64 %1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(
+ i64,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vmv.v.x_x_nxv4i64_i64(i64 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, a1, e64,m4
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(
+ i64 %0,
+ i64 %1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(
+ i64,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vmv.v.x_x_nxv8i64_i64(i64 %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, a1, e64,m8
+; CHECK: vmv.v.x {{v[0-9]+}}, a0
+ %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(
+ i64 %0,
+ i64 %1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vmv.v.x_i_nxv1i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vmv.v.x_i_nxv2i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vmv.v.x_i_nxv4i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,mf2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vmv.v.x_i_nxv8i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vmv.v.x_i_nxv16i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vmv.v.x_i_nxv32i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vmv.v.x_i_nxv64i8_i8(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, a0, e8,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+ i8 9,
+ i64 %0)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vmv.v.x_i_nxv1i16_i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,mf4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+ i16 9,
+ i64 %0)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vmv.v.x_i_nxv2i16_i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,mf2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+ i16 9,
+ i64 %0)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vmv.v.x_i_nxv4i16_i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+ i16 9,
+ i64 %0)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vmv.v.x_i_nxv8i16_i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+ i16 9,
+ i64 %0)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vmv.v.x_i_nxv16i16_i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+ i16 9,
+ i64 %0)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vmv.v.x_i_nxv32i16_i16(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, a0, e16,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+ i16 9,
+ i64 %0)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vmv.v.x_i_nxv1i32_i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,mf2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+ i32 9,
+ i64 %0)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vmv.v.x_i_nxv2i32_i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+ i32 9,
+ i64 %0)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vmv.v.x_i_nxv4i32_i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+ i32 9,
+ i64 %0)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vmv.v.x_i_nxv8i32_i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+ i32 9,
+ i64 %0)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vmv.v.x_i_nxv16i32_i32(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, a0, e32,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+ i32 9,
+ i64 %0)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vmv.v.x_i_nxv1i64_i64(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, a0, e64,m1
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(
+ i64 9,
+ i64 %0)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vmv.v.x_i_nxv2i64_i64(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, a0, e64,m2
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(
+ i64 9,
+ i64 %0)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vmv.v.x_i_nxv4i64_i64(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, a0, e64,m4
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(
+ i64 9,
+ i64 %0)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vmv.v.x_i_nxv8i64_i64(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, a0, e64,m8
+; CHECK: vmv.v.i {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(
+ i64 9,
+ i64 %0)
+
+ ret <vscale x 8 x i64> %a
+}
More information about the llvm-branch-commits
mailing list