[llvm-branch-commits] [llvm] 86d282b - [RISCV] Add intrinsics for vmv.x.s and vmv.s.x

Craig Topper via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Fri Dec 18 10:36:04 PST 2020


Author: Craig Topper
Date: 2020-12-18T10:30:48-08:00
New Revision: 86d282baede58f675ea6df8d64bfee582b6679b7

URL: https://github.com/llvm/llvm-project/commit/86d282baede58f675ea6df8d64bfee582b6679b7
DIFF: https://github.com/llvm/llvm-project/commit/86d282baede58f675ea6df8d64bfee582b6679b7.diff

LOG: [RISCV] Add intrinsics for vmv.x.s and vmv.s.x

This adds intrinsics for vmv.x.s and vmv.s.x.

I've used stricter type constraints on these intrinsics than what we've been doing on the arithmetic intrinsics so far. This will allow us to not need to pass the scalar type to the Intrinsic::getDeclaration call when creating these intrinsics.

A custom ISD is used for vmv.x.s in order to implement the change in computeNumSignBitsForTargetNode which can remove sign extends on the result.

I also modified the MC layer description of these instructions to show the tied source/dest operand. This is different than what we do for masked instructions where we drop the tied source operand when converting to MC. But it is a more accurate description of the instruction. We can't do this for masked instructions since we use the same MC instruction for masked and unmasked. Tools like llvm-mca operate in the MC layer and rely on ins/outs and Uses/Defs for analysis so I don't know if we'll be able to maintain the current behavior for masked instructions. So I went with the accurate description here since it was easy.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D93365

Added: 
    llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/lib/Target/RISCV/RISCVInstrInfoV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index c207100adb7e..c6f3a492683e 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -339,4 +339,14 @@ let TargetPrefix = "riscv" in {
 
   def int_riscv_vmv_v_v : RISCVUnary;
   def int_riscv_vmv_v_x : RISCVUnary;
+
+  def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
+                                    [llvm_anyint_ty],
+                                    [IntrNoMem]>, RISCVVIntrinsic;
+  def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty],
+                                    [LLVMMatchType<0>, LLVMVectorElementType<0>,
+                                     llvm_anyint_ty],
+                                    [IntrNoMem]>, RISCVVIntrinsic {
+    let ExtendOperand = 2;
+  }
 } // TargetPrefix = "riscv"

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6dac023fe337..a484669e7a0b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -348,14 +348,17 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setBooleanVectorContents(ZeroOrOneBooleanContent);
 
     // RVV intrinsics may have illegal operands.
+    // We also need to custom legalize vmv.x.s.
     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
+    setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
+    setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
 
     if (Subtarget.is64Bit()) {
-      setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
-      setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
+      setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
+      setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
     }
   }
 
@@ -1039,9 +1042,9 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
         assert(II->ExtendedOperand < Op.getNumOperands());
         SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
         SDValue &ScalarOp = Operands[II->ExtendedOperand];
-        if (ScalarOp.getValueType() == MVT::i8 ||
-            ScalarOp.getValueType() == MVT::i16 ||
-            ScalarOp.getValueType() == MVT::i32) {
+        EVT OpVT = ScalarOp.getValueType();
+        if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
+            (OpVT == MVT::i32 && Subtarget.is64Bit())) {
           ScalarOp =
               DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp);
           return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
@@ -1058,6 +1061,10 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     EVT PtrVT = getPointerTy(DAG.getDataLayout());
     return DAG.getRegister(RISCV::X4, PtrVT);
   }
+  case Intrinsic::riscv_vmv_x_s:
+    assert(Op.getValueType() == Subtarget.getXLenVT() && "Unexpected VT!");
+    return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
+                       Op.getOperand(1));
   }
 }
 
@@ -1077,9 +1084,9 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
         assert(ExtendOp < Op.getNumOperands());
         SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
         SDValue &ScalarOp = Operands[ExtendOp];
-        if (ScalarOp.getValueType() == MVT::i32 ||
-            ScalarOp.getValueType() == MVT::i16 ||
-            ScalarOp.getValueType() == MVT::i8) {
+        EVT OpVT = ScalarOp.getValueType();
+        if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
+            (OpVT == MVT::i32 && Subtarget.is64Bit())) {
           ScalarOp =
               DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp);
           return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(), Operands);
@@ -1309,6 +1316,25 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
     break;
   }
+  case ISD::INTRINSIC_WO_CHAIN: {
+    unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+    switch (IntNo) {
+    default:
+      llvm_unreachable(
+          "Don't know how to custom type legalize this intrinsic!");
+    case Intrinsic::riscv_vmv_x_s: {
+      EVT VT = N->getValueType(0);
+      assert((VT == MVT::i8 || VT == MVT::i16 ||
+              (Subtarget.is64Bit() && VT == MVT::i32)) &&
+             "Unexpected custom legalisation!");
+      SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
+                                    Subtarget.getXLenVT(), N->getOperand(1));
+      Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
+      break;
+    }
+    }
+    break;
+  }
   }
 }
 
@@ -1730,6 +1756,11 @@ unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
     // more precise answer could be calculated for SRAW depending on known
     // bits in the shift amount.
     return 33;
+  case RISCVISD::VMV_X_S:
+    // The number of sign bits of the scalar result is computed by obtaining the
+    // element type of the input vector operand, substracting its width from the
+    // XLEN, and then adding one (sign bit within the element type).
+    return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
   }
 
   return 1;
@@ -3369,6 +3400,7 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(GREVIW)
   NODE_NAME_CASE(GORCI)
   NODE_NAME_CASE(GORCIW)
+  NODE_NAME_CASE(VMV_X_S)
   }
   // clang-format on
   return nullptr;

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index fb81b2454923..28ce459e3027 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -77,6 +77,11 @@ enum NodeType : unsigned {
   GREVIW,
   GORCI,
   GORCIW,
+  // Vector Extension
+  // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT
+  // sign extended from the vector element size. NOTE: The result size will
+  // never be less than the vector element size.
+  VMV_X_S,
 };
 } // namespace RISCVISD
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index 3164326e9511..3c587636144f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -967,8 +967,9 @@ def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd),
 let vm = 1 in {
 def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
                       (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">;
-def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd),
-                      (ins GPR:$rs1), "vmv.s.x", "$vd, $rs1">;
+let Constraints = "$vd = $vd_wb" in
+def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
+                      (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">;
 
 }
 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index bd4694633791..4927b82ec661 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -14,6 +14,10 @@
 ///
 //===----------------------------------------------------------------------===//
 
+def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S",
+                           SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>,
+                                                SDTCisInt<1>]>>;
+
 // X0 has special meaning for vsetvl/vsetvli.
 //  rd | rs1 |   AVL value | Effect on vl
 //--------------------------------------------------------------
@@ -1349,6 +1353,30 @@ defm PseudoVFRSUB      : VPseudoBinaryV_VX</*IsFloat=*/1>;
 
 } // Predicates = [HasStdExtV, HasStdExtF]
 
+//===----------------------------------------------------------------------===//
+// 17.1. Integer Scalar Move Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtV] in {
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1,
+    Uses = [VL, VTYPE] in {
+  foreach m = MxList.m in {
+    let VLMul = m.value in {
+      let SEWIndex = 2, BaseInstr = VMV_X_S in
+      def PseudoVMV_X_S # "_" # m.MX: Pseudo<(outs GPR:$rd),
+                                             (ins m.vrclass:$rs2, ixlenimm:$sew),
+                                             []>, RISCVVPseudo;
+      let VLIndex = 3, SEWIndex = 4, BaseInstr = VMV_S_X,
+          Constraints = "$rd = $rs1" in
+      def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd),
+                                             (ins m.vrclass:$rs1, GPR:$rs2,
+                                             GPR:$vl, ixlenimm:$sew),
+                                             []>, RISCVVPseudo;
+    }
+  }
+}
+}
+
 //===----------------------------------------------------------------------===//
 // Patterns.
 //===----------------------------------------------------------------------===//
@@ -1514,3 +1542,18 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>;
 defm "" : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>;
 
 } // Predicates = [HasStdExtV, HasStdExtF]
+
+//===----------------------------------------------------------------------===//
+// 17.1. Integer Scalar Move Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtV] in {
+foreach vti = AllIntegerVectors in {
+  def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)),
+            (!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.SEW)>;
+  def : Pat<(vti.Vector (int_riscv_vmv_s_x (vti.Vector vti.RegClass:$rs1),
+                                           GPR:$rs2, GPR:$vl)),
+            (!cast<Instruction>("PseudoVMV_S_X_" # vti.LMul.MX)
+             (vti.Vector $rs1), $rs2, (NoX0 GPR:$vl), vti.SEW)>;
+}
+} // Predicates = [HasStdExtV]

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll
new file mode 100644
index 000000000000..346828906264
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll
@@ -0,0 +1,236 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8>, i8, i32)
+
+define <vscale x 1 x i8> @intrinsic_vmv.s.x_x_nxv1i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8> %0, i8 %1, i32 %2)
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8>, i8, i32)
+
+define <vscale x 2 x i8> @intrinsic_vmv.s.x_x_nxv2i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8> %0, i8 %1, i32 %2)
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8>, i8, i32)
+
+define <vscale x 4 x i8> @intrinsic_vmv.s.x_x_nxv4i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8> %0, i8 %1, i32 %2)
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8>, i8, i32)
+
+define <vscale x 8 x i8> @intrinsic_vmv.s.x_x_nxv8i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8> %0, i8 %1, i32 %2)
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8>, i8, i32)
+
+define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8> %0, i8 %1, i32 %2)
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8>, i8, i32)
+
+define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8> %0, i8 %1, i32 %2)
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8>, i8, i32)
+
+define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8> %0, i8 %1, i32 %2)
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16>, i16, i32)
+
+define <vscale x 1 x i16> @intrinsic_vmv.s.x_x_nxv1i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16> %0, i16 %1, i32 %2)
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16>, i16, i32)
+
+define <vscale x 2 x i16> @intrinsic_vmv.s.x_x_nxv2i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16> %0, i16 %1, i32 %2)
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16>, i16, i32)
+
+define <vscale x 4 x i16> @intrinsic_vmv.s.x_x_nxv4i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16> %0, i16 %1, i32 %2)
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16>, i16, i32)
+
+define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16> %0, i16 %1, i32 %2)
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16>, i16, i32)
+
+define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16> %0, i16 %1, i32 %2)
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16>, i16, i32)
+
+define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16> %0, i16 %1, i32 %2)
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32>, i32, i32)
+
+define <vscale x 1 x i32> @intrinsic_vmv.s.x_x_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2)
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32>, i32, i32)
+
+define <vscale x 2 x i32> @intrinsic_vmv.s.x_x_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2)
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32>, i32, i32)
+
+define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2)
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32>, i32, i32)
+
+define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2)
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32>, i32, i32)
+
+define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2)
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll
new file mode 100644
index 000000000000..986ef7602ee6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll
@@ -0,0 +1,288 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8>, i8, i64);
+
+define <vscale x 1 x i8> @intrinsic_vmv.s.x_x_nxv1i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8> %0, i8 %1, i64 %2)
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8>, i8, i64);
+
+define <vscale x 2 x i8> @intrinsic_vmv.s.x_x_nxv2i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8> %0, i8 %1, i64 %2)
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8>, i8, i64);
+
+define <vscale x 4 x i8> @intrinsic_vmv.s.x_x_nxv4i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8> %0, i8 %1, i64 %2)
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8>, i8, i64);
+
+define <vscale x 8 x i8> @intrinsic_vmv.s.x_x_nxv8i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8> %0, i8 %1, i64 %2)
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8>, i8, i64);
+
+define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8> %0, i8 %1, i64 %2)
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8>, i8, i64);
+
+define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8> %0, i8 %1, i64 %2)
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8>, i8, i64);
+
+define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8> %0, i8 %1, i64 %2)
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16>, i16, i64);
+
+define <vscale x 1 x i16> @intrinsic_vmv.s.x_x_nxv1i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16> %0, i16 %1, i64 %2)
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16>, i16, i64);
+
+define <vscale x 2 x i16> @intrinsic_vmv.s.x_x_nxv2i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16> %0, i16 %1, i64 %2)
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16>, i16, i64);
+
+define <vscale x 4 x i16> @intrinsic_vmv.s.x_x_nxv4i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16> %0, i16 %1, i64 %2)
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16>, i16, i64);
+
+define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16> %0, i16 %1, i64 %2)
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16>, i16, i64);
+
+define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16> %0, i16 %1, i64 %2)
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16>, i16, i64);
+
+define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16> %0, i16 %1, i64 %2)
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32>, i32, i64);
+
+define <vscale x 1 x i32> @intrinsic_vmv.s.x_x_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32> %0, i32 %1, i64 %2)
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32>, i32, i64);
+
+define <vscale x 2 x i32> @intrinsic_vmv.s.x_x_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32> %0, i32 %1, i64 %2)
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32>, i32, i64);
+
+define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32> %0, i32 %1, i64 %2)
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32>, i32, i64);
+
+define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32> %0, i32 %1, i64 %2)
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32>, i32, i64);
+
+define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32> %0, i32 %1, i64 %2)
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64>, i64, i64);
+
+define <vscale x 1 x i64> @intrinsic_vmv.s.x_x_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2)
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64(<vscale x 2 x i64>, i64, i64);
+
+define <vscale x 2 x i64> @intrinsic_vmv.s.x_x_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2)
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64(<vscale x 4 x i64>, i64, i64);
+
+define <vscale x 4 x i64> @intrinsic_vmv.s.x_x_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2)
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64>, i64, i64);
+
+define <vscale x 8 x i64> @intrinsic_vmv.s.x_x_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT:    vmv.s.x v16, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2)
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll
new file mode 100644
index 000000000000..5d91fc1d45af
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll
@@ -0,0 +1,236 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+declare i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv1i8(<vscale x 1 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> %0)
+  ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv2i8(<vscale x 2 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> %0)
+  ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv4i8(<vscale x 4 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> %0)
+  ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv8i8(<vscale x 8 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> %0)
+  ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv16i8(<vscale x 16 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> %0)
+  ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv32i8(<vscale x 32 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> %0)
+  ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv64i8(<vscale x 64 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> %0)
+  ret i8 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv1i16(<vscale x 1 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> %0)
+  ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv2i16(<vscale x 2 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> %0)
+  ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv4i16(<vscale x 4 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> %0)
+  ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv8i16(<vscale x 8 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> %0)
+  ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv16i16(<vscale x 16 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i16 @llvm.riscv.vmv.x.s.nxv16i16( <vscale x 16 x i16> %0)
+  ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv32i16(<vscale x 32 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16> %0)
+  ret i16 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32>)
+
+define i32 @intrinsic_vmv.x.s_s_nxv1i32(<vscale x 1 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32> %0)
+  ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32>)
+
+define i32 @intrinsic_vmv.x.s_s_nxv2i32(<vscale x 2 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32> %0)
+  ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32>)
+
+define i32 @intrinsic_vmv.x.s_s_nxv4i32(<vscale x 4 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32> %0)
+  ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32>)
+
+define i32 @intrinsic_vmv.x.s_s_nxv8i32(<vscale x 8 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32> %0)
+  ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32>)
+
+define i32 @intrinsic_vmv.x.s_s_nxv16i32(<vscale x 16 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32> %0)
+  ret i32 %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll
new file mode 100644
index 000000000000..b773ad67a24a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll
@@ -0,0 +1,288 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+declare i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv1i8(<vscale x 1 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> %0)
+  ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv2i8(<vscale x 2 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> %0)
+  ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv4i8(<vscale x 4 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> %0)
+  ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv8i8(<vscale x 8 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> %0)
+  ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv16i8(<vscale x 16 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> %0)
+  ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv32i8(<vscale x 32 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> %0)
+  ret i8 %a
+}
+
+declare i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8>)
+
+define signext i8 @intrinsic_vmv.x.s_s_nxv64i8(<vscale x 64 x i8> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> %0)
+  ret i8 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv1i16(<vscale x 1 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> %0)
+  ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv2i16(<vscale x 2 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> %0)
+  ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv4i16(<vscale x 4 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> %0)
+  ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv8i16(<vscale x 8 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> %0)
+  ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv16i16(<vscale x 16 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i16 @llvm.riscv.vmv.x.s.nxv16i16( <vscale x 16 x i16> %0)
+  ret i16 %a
+}
+
+declare i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16>)
+
+define signext i16 @intrinsic_vmv.x.s_s_nxv32i16(<vscale x 32 x i16> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16> %0)
+  ret i16 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32>)
+
+define signext i32 @intrinsic_vmv.x.s_s_nxv1i32(<vscale x 1 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32> %0)
+  ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32>)
+
+define signext i32 @intrinsic_vmv.x.s_s_nxv2i32(<vscale x 2 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32> %0)
+  ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32>)
+
+define signext i32 @intrinsic_vmv.x.s_s_nxv4i32(<vscale x 4 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32> %0)
+  ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32>)
+
+define signext i32 @intrinsic_vmv.x.s_s_nxv8i32(<vscale x 8 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32> %0)
+  ret i32 %a
+}
+
+declare i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32>)
+
+define signext i32 @intrinsic_vmv.x.s_s_nxv16i32(<vscale x 16 x i32> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32> %0)
+  ret i32 %a
+}
+
+declare i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64>)
+
+define i64 @intrinsic_vmv.x.s_s_nxv1i64(<vscale x 1 x i64> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64> %0)
+  ret i64 %a
+}
+
+declare i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64>)
+
+define i64 @intrinsic_vmv.x.s_s_nxv2i64(<vscale x 2 x i64> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64> %0)
+  ret i64 %a
+}
+
+declare i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64>)
+
+define i64 @intrinsic_vmv.x.s_s_nxv4i64(<vscale x 4 x i64> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64> %0)
+  ret i64 %a
+}
+
+declare i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64>)
+
+define i64 @intrinsic_vmv.x.s_s_nxv8i64(<vscale x 8 x i64> %0) nounwind {
+; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmv.x.s a0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> %0)
+  ret i64 %a
+}


        


More information about the llvm-branch-commits mailing list