[llvm] 5baef63 - [RISCV] Initial infrastructure for code generation of the RISC-V V-extension

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 4 11:44:01 PST 2020


Author: Craig Topper
Date: 2020-12-04T11:39:30-08:00
New Revision: 5baef6353e8819e443d327f84edc9f2d1c8c0c9e

URL: https://github.com/llvm/llvm-project/commit/5baef6353e8819e443d327f84edc9f2d1c8c0c9e
DIFF: https://github.com/llvm/llvm-project/commit/5baef6353e8819e443d327f84edc9f2d1c8c0c9e.diff

LOG: [RISCV] Initial infrastructure for code generation of the RISC-V V-extension

The companion RFC (http://lists.llvm.org/pipermail/llvm-dev/2020-October/145850.html) gives lots of details on the overall strategy, but we summarize it here:

LLVM IR involving vector types is going to be selected using pseudo instructions (only MachineInstr). These pseudo instructions contain dummy operands to represent the vector type being operated and the vector length for the operation.
These two dummy operands, as set by instruction selection, will be used by the custom inserter to prepend every operation with an appropriate vsetvli instruction that ensures the vector architecture is properly configured for the operation. Not in this patch: later passes will remove the redundant vsetvli instructions.
Register classes of tuples of vector registers are used to represent vector register groups (LMUL > 1).
Those pseudos are eventually lowered into the actual instructions when emitting the MCInsts.
About the patch:

Because there is a bit of initial infrastructure required, this is the minimal patch that allows us to select instructions for 3 LLVM IR instructions: load, add and store vectors of integers. LLVM IR operations have "whole-vector" semantics (as in they generate values for all the elements).

Later patches will extend the information represented in TableGen.

Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Evandro Menezes <evandro.menezes at sifive.com>
Co-Authored-by: Craig Topper <craig.topper at sifive.com>

Differential Revision: https://reviews.llvm.org/D89449

Added: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
    llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoV.td
    llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
    llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
    llvm/lib/Target/RISCV/RISCVRegisterInfo.td
    llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp
    llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
    llvm/utils/TableGen/GlobalISelEmitter.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index 504355fb8bf8..660ae915f7b8 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -59,6 +59,7 @@ class RISCVExpandPseudo : public MachineFunctionPass {
   bool expandLoadTLSGDAddress(MachineBasicBlock &MBB,
                               MachineBasicBlock::iterator MBBI,
                               MachineBasicBlock::iterator &NextMBBI);
+  bool expandVSetVL(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
 };
 
 char RISCVExpandPseudo::ID = 0;
@@ -99,6 +100,8 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
     return expandLoadTLSIEAddress(MBB, MBBI, NextMBBI);
   case RISCV::PseudoLA_TLS_GD:
     return expandLoadTLSGDAddress(MBB, MBBI, NextMBBI);
+  case RISCV::PseudoVSETVLI:
+    return expandVSetVL(MBB, MBBI);
   }
 
   return false;
@@ -188,6 +191,28 @@ bool RISCVExpandPseudo::expandLoadTLSGDAddress(
                              RISCV::ADDI);
 }
 
+bool RISCVExpandPseudo::expandVSetVL(MachineBasicBlock &MBB,
+                                     MachineBasicBlock::iterator MBBI) {
+  assert(MBBI->getNumOperands() == 5 && "Unexpected instruction format");
+
+  DebugLoc DL = MBBI->getDebugLoc();
+
+  assert(MBBI->getOpcode() == RISCV::PseudoVSETVLI &&
+         "Unexpected pseudo instruction");
+  const MCInstrDesc &Desc = TII->get(RISCV::VSETVLI);
+  assert(Desc.getNumOperands() == 3 && "Unexpected instruction format");
+
+  Register DstReg = MBBI->getOperand(0).getReg();
+  bool DstIsDead = MBBI->getOperand(0).isDead();
+  BuildMI(MBB, MBBI, DL, Desc)
+      .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
+      .add(MBBI->getOperand(1))  // VL
+      .add(MBBI->getOperand(2)); // VType
+
+  MBBI->eraseFromParent(); // The pseudo instruction is gone now.
+  return true;
+}
+
 } // end of anonymous namespace
 
 INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo",

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 8768f6b652b8..e9a7e4e45bdd 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -89,6 +89,53 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   if (Subtarget.hasStdExtD())
     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
 
+  if (Subtarget.hasStdExtV()) {
+    addRegisterClass(RISCVVMVTs::vbool64_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vbool32_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vbool16_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vbool8_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vbool4_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vbool2_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vbool1_t, &RISCV::VRRegClass);
+
+    addRegisterClass(RISCVVMVTs::vint8mf8_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vint8mf4_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vint8mf2_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vint8m1_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vint8m2_t, &RISCV::VRM2RegClass);
+    addRegisterClass(RISCVVMVTs::vint8m4_t, &RISCV::VRM4RegClass);
+    addRegisterClass(RISCVVMVTs::vint8m8_t, &RISCV::VRM8RegClass);
+
+    addRegisterClass(RISCVVMVTs::vint16mf4_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vint16mf2_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vint16m1_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vint16m2_t, &RISCV::VRM2RegClass);
+    addRegisterClass(RISCVVMVTs::vint16m4_t, &RISCV::VRM4RegClass);
+    addRegisterClass(RISCVVMVTs::vint16m8_t, &RISCV::VRM8RegClass);
+
+    addRegisterClass(RISCVVMVTs::vint32mf2_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vint32m1_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vint32m2_t, &RISCV::VRM2RegClass);
+    addRegisterClass(RISCVVMVTs::vint32m4_t, &RISCV::VRM4RegClass);
+    addRegisterClass(RISCVVMVTs::vint32m8_t, &RISCV::VRM8RegClass);
+
+    addRegisterClass(RISCVVMVTs::vint64m1_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vint64m2_t, &RISCV::VRM2RegClass);
+    addRegisterClass(RISCVVMVTs::vint64m4_t, &RISCV::VRM4RegClass);
+    addRegisterClass(RISCVVMVTs::vint64m8_t, &RISCV::VRM8RegClass);
+
+    addRegisterClass(RISCVVMVTs::vfloat32mf2_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vfloat32m1_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vfloat32m2_t, &RISCV::VRM2RegClass);
+    addRegisterClass(RISCVVMVTs::vfloat32m4_t, &RISCV::VRM4RegClass);
+    addRegisterClass(RISCVVMVTs::vfloat32m8_t, &RISCV::VRM8RegClass);
+
+    addRegisterClass(RISCVVMVTs::vfloat64m1_t, &RISCV::VRRegClass);
+    addRegisterClass(RISCVVMVTs::vfloat64m2_t, &RISCV::VRM2RegClass);
+    addRegisterClass(RISCVVMVTs::vfloat64m4_t, &RISCV::VRM4RegClass);
+    addRegisterClass(RISCVVMVTs::vfloat64m8_t, &RISCV::VRM8RegClass);
+  }
+
   // Compute derived properties from the register classes.
   computeRegisterProperties(STI.getRegisterInfo());
 
@@ -284,6 +331,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
 
   setBooleanContents(ZeroOrOneBooleanContent);
 
+  if (Subtarget.hasStdExtV())
+    setBooleanVectorContents(ZeroOrOneBooleanContent);
+
   // Function alignments.
   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
   setMinFunctionAlignment(FunctionAlignment);
@@ -1856,9 +1906,95 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
   return TailMBB;
 }
 
+static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
+                                    int VLIndex, unsigned SEWIndex,
+                                    unsigned VLMul) {
+  MachineFunction &MF = *BB->getParent();
+  DebugLoc DL = MI.getDebugLoc();
+  const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+
+  unsigned SEW = MI.getOperand(SEWIndex).getImm();
+  RISCVVLengthMultiplier::LengthMultiplier Multiplier;
+
+  switch (VLMul) {
+  default:
+    llvm_unreachable("Unexpected LMUL for instruction");
+  case 0:
+  case 1:
+  case 2:
+  case 3:
+  case 5:
+  case 6:
+  case 7:
+    Multiplier = static_cast<RISCVVLengthMultiplier::LengthMultiplier>(VLMul);
+    break;
+  }
+
+  RISCVVStandardElementWidth::StandardElementWidth ElementWidth;
+  switch (SEW) {
+  default:
+    llvm_unreachable("Unexpected SEW for instruction");
+  case 8:
+    ElementWidth = RISCVVStandardElementWidth::ElementWidth8;
+    break;
+  case 16:
+    ElementWidth = RISCVVStandardElementWidth::ElementWidth16;
+    break;
+  case 32:
+    ElementWidth = RISCVVStandardElementWidth::ElementWidth32;
+    break;
+  case 64:
+    ElementWidth = RISCVVStandardElementWidth::ElementWidth64;
+    break;
+  }
+
+  MachineRegisterInfo &MRI = MF.getRegInfo();
+
+  // VL and VTYPE are alive here.
+  MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI));
+
+  if (VLIndex >= 0) {
+    // Set VL (rs1 != X0).
+    unsigned DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+    MIB.addReg(DestReg, RegState::Define | RegState::Dead)
+       .addReg(MI.getOperand(VLIndex).getReg());
+  } else
+    // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0).
+    MIB.addReg(RISCV::X0, RegState::Dead)
+       .addReg(RISCV::X0, RegState::Kill);
+
+  // For simplicity we reuse the vtype representation here.
+  // Bits | Name       | Description
+  // -----+------------+------------------------------------------------
+  // 5    | vlmul[2]   | Fractional lmul?
+  // 4:2  | vsew[2:0]  | Standard element width (SEW) setting
+  // 1:0  | vlmul[1:0] | Vector register group multiplier (LMUL) setting
+  MIB.addImm(((Multiplier & 0x4) << 3) | ((ElementWidth & 0x3) << 2) |
+             (Multiplier & 0x3));
+
+  // Remove (now) redundant operands from pseudo
+  MI.getOperand(SEWIndex).setImm(-1);
+  if (VLIndex >= 0) {
+    MI.getOperand(VLIndex).setReg(RISCV::NoRegister);
+    MI.getOperand(VLIndex).setIsKill(false);
+  }
+
+  return BB;
+}
+
 MachineBasicBlock *
 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
                                                  MachineBasicBlock *BB) const {
+
+  if (const RISCVVPseudosTable::PseudoInfo *RVV =
+          RISCVVPseudosTable::getPseudoInfo(MI.getOpcode())) {
+    int VLIndex = RVV->getVLIndex();
+    int SEWIndex = RVV->getSEWIndex();
+
+    assert(SEWIndex >= 0 && "SEWIndex must be >= 0");
+    return addVSetVL(MI, BB, VLIndex, SEWIndex, RVV->VLMul);
+  }
+
   switch (MI.getOpcode()) {
   default:
     llvm_unreachable("Unexpected instr type to insert");

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index e3dcaf0ac997..220aa9acc771 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -1127,3 +1127,5 @@ let Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64] in {
   defm VAMOMINUEI64 : VAMO<AMOOPVamoMinu, LSWidth64, "vamominuei64.v">;
   defm VAMOMAXUEI64 : VAMO<AMOOPVamoMaxu, LSWidth64, "vamomaxuei64.v">;
 } // Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64]
+
+include "RISCVInstrInfoVPseudos.td"

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
new file mode 100644
index 000000000000..f4e0a6f3b82d
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -0,0 +1,308 @@
+//===-- RISCVInstrInfoVPseudos.td - RISC-V 'V' Pseudos -----*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file contains the required infrastructure to support code generation
+/// for the standard 'V' (Vector) extension, version 0.9.  This version is still
+/// experimental as the 'V' extension hasn't been ratified yet.
+///
+/// This file is included from RISCVInstrInfoV.td
+///
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Utilities.
+//===----------------------------------------------------------------------===//
+
+// This class describes information associated to the LMUL.
+class LMULInfo<int lmul, VReg regclass, string mx> {
+  bits<3> value = lmul; // This is encoded as the vlmul field of vtype.
+  VReg vrclass = regclass;
+  string MX = mx;
+}
+
+// Associate LMUL with tablegen records of register classes.
+def V_M1  : LMULInfo<0b000,   VR,  "M1">;
+def V_M2  : LMULInfo<0b001, VRM2,  "M2">;
+def V_M4  : LMULInfo<0b010, VRM4,  "M4">;
+def V_M8  : LMULInfo<0b011, VRM8,  "M8">;
+
+def V_MF8 : LMULInfo<0b101,   VR, "MF8">;
+def V_MF4 : LMULInfo<0b110,   VR, "MF4">;
+def V_MF2 : LMULInfo<0b111,   VR, "MF2">;
+
+// Used to iterate over all possible LMULs.
+def MxList {
+  list<LMULInfo> m = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
+}
+
+class shift_amount<int num> {
+  int val = !if(!eq(num, 1), 0, !add(1, shift_amount<!srl(num, 1)>.val));
+}
+
+// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
+def VLMax : OutPatFrag<(ops), (XLenVT X0)>;
+
+// List of EEW.
+defvar EEWList = [8, 16, 32, 64];
+
+//===----------------------------------------------------------------------===//
+// Vector register and vector group type information.
+//===----------------------------------------------------------------------===//
+
+class VectorTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M>
+{
+  ValueType Vector = Vec;
+  ValueType Mask = Mas;
+  int SEW = Sew;
+  VReg RegClass = Reg;
+  LMULInfo LMul = M;
+}
+
+class GroupVectorTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas,
+                          int Sew, VReg Reg, LMULInfo M>
+    : VectorTypeInfo<Vec, Mas, Sew, Reg, M>
+{
+  ValueType VectorM1 = VecM1;
+}
+
+defset list<VectorTypeInfo> AllVectors = {
+  defset list<VectorTypeInfo> AllIntegerVectors = {
+    def VtypeInt8MF8  : VectorTypeInfo<vint8mf8_t,  vbool64_t,  8, VR, V_MF8>;
+    def VtypeInt8MF4  : VectorTypeInfo<vint8mf4_t,  vbool32_t,  8, VR, V_MF4>;
+    def VtypeInt8MF2  : VectorTypeInfo<vint8mf2_t,  vbool16_t,  8, VR, V_MF2>;
+    def VtypeInt8M1   : VectorTypeInfo<vint8m1_t,   vbool8_t,   8, VR, V_M1>;
+    def VtypeInt16MF4 : VectorTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
+    def VtypeInt16MF2 : VectorTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
+    def VtypeInt16M1  : VectorTypeInfo<vint16m1_t,  vbool16_t, 16, VR, V_M1>;
+    def VtypeInt32MF2 : VectorTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
+    def VtypeInt32M1  : VectorTypeInfo<vint32m1_t,  vbool32_t, 32, VR, V_M1>;
+    def VtypeInt64M1  : VectorTypeInfo<vint64m1_t,  vbool64_t, 64, VR, V_M1>;
+
+    def VtypeInt8M2  : GroupVectorTypeInfo<vint8m2_t,  vint8m1_t,  vbool4_t, 8,  VRM2, V_M2>;
+    def VtypeInt8M4  : GroupVectorTypeInfo<vint8m4_t,  vint8m1_t,  vbool2_t, 8,  VRM4, V_M4>;
+    def VtypeInt8M8  : GroupVectorTypeInfo<vint8m8_t,  vint8m1_t,  vbool1_t, 8,  VRM8, V_M8>;
+
+    def VtypeInt16M2  : GroupVectorTypeInfo<vint16m2_t, vint16m1_t, vbool8_t, 16, VRM2, V_M2>;
+    def VtypeInt16M4 : GroupVectorTypeInfo<vint16m4_t, vint16m1_t, vbool4_t, 16, VRM4, V_M4>;
+    def VtypeInt16M8 : GroupVectorTypeInfo<vint16m8_t, vint16m1_t, vbool2_t, 16, VRM8, V_M8>;
+
+    def VtypeInt32M2  : GroupVectorTypeInfo<vint32m2_t, vint32m1_t, vbool16_t, 32, VRM2, V_M2>;
+    def VtypeInt32M4  : GroupVectorTypeInfo<vint32m4_t, vint32m1_t, vbool8_t,  32, VRM4, V_M4>;
+    def VtypeInt32M8 : GroupVectorTypeInfo<vint32m8_t, vint32m1_t, vbool4_t,  32, VRM8, V_M8>;
+
+    def VtypeInt64M2  : GroupVectorTypeInfo<vint64m2_t, vint64m1_t, vbool32_t, 64, VRM2, V_M2>;
+    def VtypeInt64M4  : GroupVectorTypeInfo<vint64m4_t, vint64m1_t, vbool16_t, 64, VRM4, V_M4>;
+    def VtypeInt64M8  : GroupVectorTypeInfo<vint64m8_t, vint64m1_t, vbool8_t,  64, VRM8, V_M8>;
+  }
+}
+
+// This class holds the record of the RISCVVPseudoTable below.
+// This represents the information we need in codegen for each pseudo.
+class RISCVVPseudo {
+  Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
+  Instruction BaseInstr;
+  bits<8> VLIndex;
+  bits<8> SEWIndex;
+  bits<8> MergeOpIndex;
+  bits<3> VLMul;
+}
+
+// The actual table.
+def RISCVVPseudosTable : GenericTable {
+  let FilterClass = "RISCVVPseudo";
+  let CppTypeName = "PseudoInfo";
+  let Fields = [ "Pseudo", "BaseInstr", "VLIndex", "SEWIndex", "MergeOpIndex",
+                 "VLMul" ];
+  let PrimaryKey = [ "Pseudo" ];
+  let PrimaryKeyName = "getPseudoInfo";
+}
+
+//===----------------------------------------------------------------------===//
+// Helpers to define the 
diff erent pseudo instructions.
+//===----------------------------------------------------------------------===//
+
+multiclass pseudo_binary<VReg result_reg_class,
+                         VReg op1_reg_class,
+                         DAGOperand op2_kind,
+                         LMULInfo vlmul > {
+  let Constraints = "$rd = $merge",
+      Uses = [VL, VTYPE], VLIndex = 5, SEWIndex = 6, MergeOpIndex = 1,
+      BaseInstr = !cast<Instruction>(!subst("Pseudo", "", NAME)) in
+    def "_"# vlmul.MX : Pseudo<(outs result_reg_class:$rd),
+                            (ins result_reg_class:$merge,
+                                 op1_reg_class:$rs2, op2_kind:$rs1,
+                                 VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),
+                            []>,
+                     RISCVVPseudo;
+}
+
+multiclass pseudo_binary_v_vv_vx_vi<Operand imm_type = simm5,
+                                    bit force_earlyclobber = 0> {
+  let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1 in
+  foreach m = MxList.m in
+  {
+    let VLMul = m.value in
+    {
+      defvar evr = m.vrclass;
+      defm _VV : pseudo_binary<evr, evr, evr, m>;
+      defm _VX : pseudo_binary<evr, evr, GPR, m>;
+      defm _VI : pseudo_binary<evr, evr, imm_type, m>;
+    }
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Helpers to define the 
diff erent patterns.
+//===----------------------------------------------------------------------===//
+
+multiclass pat_vop_binary<SDNode vop,
+                          string instruction_name,
+                          ValueType result_type,
+                          ValueType op_type,
+                          ValueType mask_type,
+                          int sew,
+                          LMULInfo vlmul,
+                          VReg result_reg_class,
+                          VReg op_reg_class,
+                          bit swap = 0>
+{
+  defvar instruction = !cast<Instruction>(instruction_name#"_VV_"# vlmul.MX);
+  def : Pat<(result_type (vop
+                          (op_type op_reg_class:$rs1),
+                          (op_type op_reg_class:$rs2))),
+            (instruction (result_type (IMPLICIT_DEF)),
+                         op_reg_class:$rs1,
+                         op_reg_class:$rs2,
+                         (mask_type zero_reg),
+                         VLMax, sew)>;
+}
+
+multiclass pat_vop_binary_common<SDNode vop,
+                                 string instruction_name,
+                                 list<VectorTypeInfo> vtilist>
+{
+  foreach vti = vtilist in
+  defm : pat_vop_binary<vop, instruction_name,
+                        vti.Vector, vti.Vector, vti.Mask, vti.SEW,
+                        vti.LMul, vti.RegClass, vti.RegClass>;
+}
+
+//===----------------------------------------------------------------------===//
+// Pseudo instructions and patterns.
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtV] in {
+
+//===----------------------------------------------------------------------===//
+// 6. Configuration-Setting Instructions
+//===----------------------------------------------------------------------===//
+
+// Pseudos.
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
+def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei), []>;
+
+}
+
+//===----------------------------------------------------------------------===//
+// 7. Vector Loads and Stores
+//===----------------------------------------------------------------------===//
+
+// Pseudos.
+foreach eew = EEWList in {
+  foreach lmul = MxList.m in {
+    defvar LInfo = lmul.MX;
+    defvar vreg = lmul.vrclass;
+    defvar vlmul = lmul.value;
+    defvar constraint = "$rd = $merge";
+
+    let mayLoad = 1, mayStore = 0, hasSideEffects = 0,
+        usesCustomInserter = 1,
+        VLMul = vlmul in
+    {
+      let Uses = [VL, VTYPE], VLIndex = 4, SEWIndex = 5, MergeOpIndex = 1,
+          Constraints = constraint,
+          BaseInstr = !cast<Instruction>("VLE" # eew # "_V") in
+          def "PseudoVLE" # eew # "_V_" # LInfo
+            : Pseudo<(outs vreg:$rd),
+                     (ins vreg:$merge, GPR:$rs1, VMaskOp:$mask, GPR:$vl,
+                      ixlenimm:$sew),
+                     []>,
+              RISCVVPseudo;
+    }
+
+    let mayLoad = 0, mayStore = 1, hasSideEffects = 0,
+        usesCustomInserter = 1,
+        VLMul = vlmul in
+    {
+      // Masked stores do not have a merge operand as merge is done in memory
+      let Uses = [VL, VTYPE],
+          VLIndex = 3, SEWIndex = 4, MergeOpIndex = -1,
+          BaseInstr = !cast<Instruction>("VSE" # eew # "_V") in
+        def "PseudoVSE" # eew # "_V_" # LInfo
+            : Pseudo<(outs),
+                     (ins vreg:$rd, GPR:$rs1, VMaskOp:$mask, GPR:$vl,
+                          ixlenimm:$sew),
+                     []>,
+              RISCVVPseudo;
+    }
+  }
+}
+
+// Patterns.
+multiclass pat_load_store<LLVMType type,
+                          LLVMType mask_type,
+                          int sew,
+                          LMULInfo vlmul,
+                          VReg reg_class>
+{
+  defvar load_instr = !cast<Instruction>("PseudoVLE" # sew # "_V_"# vlmul.MX);
+  defvar store_instr = !cast<Instruction>("PseudoVSE" # sew # "_V_"# vlmul.MX);
+  // Load
+  def : Pat<(type (load GPR:$rs1)),
+            (load_instr (type (IMPLICIT_DEF)),
+             GPR:$rs1,
+             (mask_type zero_reg),
+             VLMax, sew)>;
+  def : Pat<(type (load AddrFI:$rs1)),
+             (load_instr (type (IMPLICIT_DEF)),
+             AddrFI:$rs1,
+             (mask_type zero_reg),
+             VLMax, sew)>;
+
+  // Store
+  def : Pat<(store type:$rs2, GPR:$rs1),
+            (store_instr reg_class:$rs2, GPR:$rs1,
+             (mask_type zero_reg),
+              VLMax, sew)>;
+  def : Pat<(store type:$rs2, AddrFI:$rs1),
+            (store_instr reg_class:$rs2, AddrFI:$rs1,
+             (mask_type zero_reg),
+             VLMax, sew)>;
+}
+
+foreach vti = AllVectors in
+{
+  defm : pat_load_store<vti.Vector, vti.Mask,
+                        vti.SEW, vti.LMul, vti.RegClass>;
+}
+
+//===----------------------------------------------------------------------===//
+// 12. Vector Integer Arithmetic Instructions
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// 12.1. Vector Single-Width Integer Add and Subtract
+//===----------------------------------------------------------------------===//
+
+// Pseudo instructions.
+defm PseudoVADD        : pseudo_binary_v_vv_vx_vi;
+
+// Whole-register vector patterns.
+defm "" : pat_vop_binary_common<add, "PseudoVADD", AllIntegerVectors>;
+
+} // Predicates = [HasStdExtV]

diff  --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
index b1dbcfa7f738..da0725623b45 100644
--- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
@@ -12,6 +12,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "RISCV.h"
+#include "RISCVSubtarget.h"
 #include "MCTargetDesc/RISCVMCExpr.h"
 #include "llvm/CodeGen/AsmPrinter.h"
 #include "llvm/CodeGen/MachineBasicBlock.h"
@@ -125,8 +126,72 @@ bool llvm::LowerRISCVMachineOperandToMCOperand(const MachineOperand &MO,
   return true;
 }
 
+static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
+                                            MCInst &OutMI) {
+  const RISCVVPseudosTable::PseudoInfo *RVV =
+      RISCVVPseudosTable::getPseudoInfo(MI->getOpcode());
+  if (!RVV)
+    return false;
+
+  OutMI.setOpcode(RVV->BaseInstr);
+
+  const MachineBasicBlock *MBB = MI->getParent();
+  assert(MBB && "MI expected to be in a basic block");
+  const MachineFunction *MF = MBB->getParent();
+  assert(MF && "MBB expected to be in a machine function");
+
+  const TargetRegisterInfo *TRI =
+      MF->getSubtarget<RISCVSubtarget>().getRegisterInfo();
+  assert(TRI && "TargetRegisterInfo expected");
+
+  for (const MachineOperand &MO : MI->explicit_operands()) {
+    int OpNo = (int)MI->getOperandNo(&MO);
+    assert(OpNo >= 0 && "Operand number doesn't fit in an 'int' type");
+
+    // Skip VL, SEW and MergeOp operands
+    if (OpNo == RVV->getVLIndex() || OpNo == RVV->getSEWIndex() ||
+        OpNo == RVV->getMergeOpIndex())
+      continue;
+
+    MCOperand MCOp;
+    switch (MO.getType()) {
+    default:
+      llvm_unreachable("Unknown operand type");
+    case MachineOperand::MO_Register: {
+      unsigned Reg = MO.getReg();
+
+      // Nothing to do on NoRegister operands (used as vector mask operand on
+      // unmasked instructions)
+      if (Reg == RISCV::NoRegister) {
+        MCOp = MCOperand::createReg(Reg);
+        break;
+      }
+
+      const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+      if (RC->hasSuperClassEq(&RISCV::VRM2RegClass) ||
+          RC->hasSuperClassEq(&RISCV::VRM4RegClass) ||
+          RC->hasSuperClassEq(&RISCV::VRM8RegClass)) {
+        Reg = TRI->getSubReg(Reg, RISCV::sub_vrm2);
+        assert(Reg && "Subregister does not exist");
+      }
+
+      MCOp = MCOperand::createReg(Reg);
+      break;
+    }
+    case MachineOperand::MO_Immediate:
+      MCOp = MCOperand::createImm(MO.getImm());
+      break;
+    }
+    OutMI.addOperand(MCOp);
+  }
+  return true;
+}
+
 void llvm::LowerRISCVMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
                                           const AsmPrinter &AP) {
+  if (lowerRISCVVMachineInstrToMCInst(MI, OutMI))
+    return;
+
   OutMI.setOpcode(MI->getOpcode());
 
   for (const MachineOperand &MO : MI->operands()) {

diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 9590fd46581a..15c91a27b784 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -94,6 +94,11 @@ BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
   // variable-sized objects at runtime.
   if (TFI->hasBP(MF))
     markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
+
+  // V registers for code generation. We handle them manually.
+  markSuperRegs(Reserved, RISCV::VL);
+  markSuperRegs(Reserved, RISCV::VTYPE);
+
   assert(checkAllSuperRegsMarked(Reserved));
   return Reserved;
 }

diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 27d6fcb74657..9bf4453097c8 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -270,6 +270,69 @@ def FPR64C : RegisterClass<"RISCV", [f64], 64, (add
   (sequence "F%u_D", 8, 9)
 )>;
 
+// Vector type mapping to LLVM types.
+//
+// Though the V extension allows that VLEN be as small as 8,
+// this approach assumes that VLEN>=64.
+// Additionally, the only supported ELEN values are 32 and 64,
+// thus `vscale` can be defined as VLEN/64,
+// allowing the same types with either ELEN value.
+//
+//         MF8    MF4     MF2     M1      M2      M4       M8
+// i64*    N/A    N/A     N/A     nxv1i64 nxv2i64 nxv4i64  nxv8i64
+// i32     N/A    N/A     nxv1i32 nxv2i32 nxv4i32 nxv8i32  nxv16i32
+// i16     N/A    nxv1i16 nxv2i16 nxv4i16 nxv8i16 nxv16i16 nxv32i16
+// i8      nxv1i8 nxv2i8  nxv4i8  nxv8i8  nxv16i8 nxv32i8  nxv64i8
+// double* N/A    N/A     N/A     nxv1f64 nxv2f64 nxv4f64  nxv8f64
+// float   N/A    N/A     nxv1f32 nxv2f32 nxv4f32 nxv8f32  nxv16f32
+// half    N/A    nxv1f16 nxv2f16 nxv4f16 nxv8f16 nxv16f16 nxv32f16
+// * ELEN=64
+
+defvar vint8mf8_t = nxv1i8;
+defvar vint8mf4_t = nxv2i8;
+defvar vint8mf2_t = nxv4i8;
+defvar vint8m1_t = nxv8i8;
+defvar vint8m2_t = nxv16i8;
+defvar vint8m4_t = nxv32i8;
+defvar vint8m8_t = nxv64i8;
+
+defvar vint16mf4_t = nxv1i16;
+defvar vint16mf2_t = nxv2i16;
+defvar vint16m1_t  = nxv4i16;
+defvar vint16m2_t  = nxv8i16;
+defvar vint16m4_t  = nxv16i16;
+defvar vint16m8_t  = nxv32i16;
+
+defvar vint32mf2_t = nxv1i32;
+defvar vint32m1_t  = nxv2i32;
+defvar vint32m2_t  = nxv4i32;
+defvar vint32m4_t  = nxv8i32;
+defvar vint32m8_t  = nxv16i32;
+
+defvar vint64m1_t = nxv1i64;
+defvar vint64m2_t = nxv2i64;
+defvar vint64m4_t = nxv4i64;
+defvar vint64m8_t = nxv8i64;
+
+defvar vfloat32mf2_t = nxv1f32;
+defvar vfloat32m1_t  = nxv2f32;
+defvar vfloat32m2_t  = nxv4f32;
+defvar vfloat32m4_t  = nxv8f32;
+defvar vfloat32m8_t  = nxv16f32;
+
+defvar vfloat64m1_t = nxv1f64;
+defvar vfloat64m2_t = nxv2f64;
+defvar vfloat64m4_t = nxv4f64;
+defvar vfloat64m8_t = nxv8f64;
+
+defvar vbool1_t  = nxv64i1;
+defvar vbool2_t  = nxv32i1;
+defvar vbool4_t  = nxv16i1;
+defvar vbool8_t  = nxv8i1;
+defvar vbool16_t = nxv4i1;
+defvar vbool32_t = nxv2i1;
+defvar vbool64_t = nxv1i1;
+
 // Vector registers
 let RegAltNameIndices = [ABIRegAltName] in {
   foreach Index = 0-31 in {
@@ -315,44 +378,52 @@ class RegisterTypes<list<ValueType> reg_types> {
   list<ValueType> types = reg_types;
 }
 
-// The order of registers represents the preferred allocation sequence,
-// meaning caller-save regs are listed before callee-save.
-def VR : RegisterClass<"RISCV", [nxv8i8, nxv4i16, nxv2i32, nxv1i64],
-                       64, (add
-    (sequence "V%u", 25, 31),
-    (sequence "V%u", 8, 24),
-    (sequence "V%u", 0, 7)
-  )> {
-  let Size = 64;
-}
-
-def VRNoV0 : RegisterClass<"RISCV", [nxv8i8, nxv4i16, nxv2i32, nxv1i64],
-                           64, (add
-    (sequence "V%u", 25, 31),
-    (sequence "V%u", 8, 24),
-    (sequence "V%u", 1, 7)
-  )> {
-  let Size = 64;
+class VReg<list<ValueType> regTypes, dag regList, int Vlmul>
+  : RegisterClass<"RISCV",
+                  regTypes,
+                  // FIXME: Spill alignment set to 16 bytes.
+                  128,
+                  regList> {
+  int VLMul = Vlmul;
+  int Size = !mul(Vlmul, 64); // FIXME: assuming ELEN=64
 }
 
-def VRM2 : RegisterClass<"RISCV", [nxv16i8, nxv8i16, nxv4i32, nxv2i64], 64,
-                         (add V26M2, V28M2, V30M2, V8M2, V10M2, V12M2, V14M2, V16M2,
-                              V18M2, V20M2, V22M2, V24M2, V0M2, V2M2, V4M2, V6M2)> {
-  let Size = 128;
-}
-
-def VRM4 : RegisterClass<"RISCV", [nxv32i8, nxv16i16, nxv8i32, nxv4i64], 64,
-                         (add V28M4, V8M4, V12M4, V16M4, V20M4, V24M4, V0M4, V4M4)> {
-  let Size = 256;
-}
-
-def VRM8 : RegisterClass<"RISCV", [nxv32i16, nxv16i32, nxv8i64], 64,
-                         (add V8M8, V16M8, V24M8, V0M8)> {
-  let Size = 512;
-}
-
-def VMaskVT : RegisterTypes<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, nxv32i1]>;
-
-def VMV0 : RegisterClass<"RISCV", VMaskVT.types, 64, (add V0)> {
+def VR : VReg<[vint8mf2_t, vint8mf4_t, vint8mf8_t,
+               vint16mf2_t, vint16mf4_t, vint32mf2_t,
+               vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t,
+               vfloat32mf2_t, vfloat32m1_t, vfloat64m1_t,
+               vbool64_t, vbool32_t, vbool16_t, vbool8_t, vbool4_t,
+               vbool2_t, vbool1_t],
+           (add (sequence "V%u", 25, 31),
+                (sequence "V%u", 8, 24),
+                (sequence "V%u", 0, 7)), 1>;
+
+def VRNoV0 : VReg<[vint8mf2_t, vint8mf4_t, vint8mf8_t,
+                   vint16mf2_t, vint16mf4_t, vint32mf2_t,
+                   vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t,
+                   vfloat32mf2_t, vfloat32m1_t, vfloat64m1_t,
+                   vbool64_t, vbool32_t, vbool16_t, vbool8_t, vbool4_t,
+                   vbool2_t, vbool1_t],
+               (add (sequence "V%u", 25, 31),
+                    (sequence "V%u", 8, 24),
+                    (sequence "V%u", 1, 7)), 1>;
+
+def VRM2 : VReg<[vint8m2_t, vint16m2_t, vint32m2_t, vint64m2_t,
+                 vfloat32m2_t, vfloat64m2_t],
+             (add V26M2, V28M2, V30M2, V8M2, V10M2, V12M2, V14M2, V16M2,
+                  V18M2, V20M2, V22M2, V24M2, V0M2, V2M2, V4M2, V6M2), 2>;
+
+def VRM4 : VReg<[vint8m4_t, vint16m4_t, vint32m4_t, vint64m4_t,
+                 vfloat32m4_t, vfloat64m4_t],
+             (add V28M4, V8M4, V12M4, V16M4, V20M4, V24M4, V0M4, V4M4), 4>;
+
+def VRM8 : VReg<[vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t,
+                 vfloat32m8_t, vfloat64m8_t],
+             (add V8M8, V16M8, V24M8, V0M8), 8>;
+
+defvar VMaskVTs = [vbool64_t, vbool32_t, vbool16_t, vbool8_t,
+                   vbool4_t, vbool2_t, vbool1_t];
+
+def VMV0 : RegisterClass<"RISCV", VMaskVTs, 64, (add V0)> {
   let Size = 64;
 }

diff  --git a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp
index cc170d9f34ca..5aa7ce16278f 100644
--- a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp
+++ b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp
@@ -94,4 +94,11 @@ void validate(const Triple &TT, const FeatureBitset &FeatureBits) {
 
 } // namespace RISCVFeatures
 
+namespace RISCVVPseudosTable {
+
+#define GET_RISCVVPseudosTable_IMPL
+#include "RISCVGenSearchableTables.inc"
+
+} // namespace RISCVVPseudosTable
+
 } // namespace llvm

diff  --git a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
index db036e18a286..8cee6fc440e0 100644
--- a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
@@ -18,6 +18,7 @@
 #include "llvm/ADT/StringSwitch.h"
 #include "llvm/MC/MCInstrDesc.h"
 #include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/MachineValueType.h"
 
 namespace llvm {
 
@@ -273,6 +274,111 @@ void validate(const Triple &TT, const FeatureBitset &FeatureBits);
 
 } // namespace RISCVFeatures
 
+namespace RISCVVMVTs {
+
+constexpr MVT vint8mf8_t = MVT::nxv1i8;
+constexpr MVT vint8mf4_t = MVT::nxv2i8;
+constexpr MVT vint8mf2_t = MVT::nxv4i8;
+constexpr MVT vint8m1_t = MVT::nxv8i8;
+constexpr MVT vint8m2_t = MVT::nxv16i8;
+constexpr MVT vint8m4_t = MVT::nxv32i8;
+constexpr MVT vint8m8_t = MVT::nxv64i8;
+
+constexpr MVT vint16mf4_t = MVT::nxv1i16;
+constexpr MVT vint16mf2_t = MVT::nxv2i16;
+constexpr MVT vint16m1_t = MVT::nxv4i16;
+constexpr MVT vint16m2_t = MVT::nxv8i16;
+constexpr MVT vint16m4_t = MVT::nxv16i16;
+constexpr MVT vint16m8_t = MVT::nxv32i16;
+
+constexpr MVT vint32mf2_t = MVT::nxv1i32;
+constexpr MVT vint32m1_t = MVT::nxv2i32;
+constexpr MVT vint32m2_t = MVT::nxv4i32;
+constexpr MVT vint32m4_t = MVT::nxv8i32;
+constexpr MVT vint32m8_t = MVT::nxv16i32;
+
+constexpr MVT vint64m1_t = MVT::nxv1i64;
+constexpr MVT vint64m2_t = MVT::nxv2i64;
+constexpr MVT vint64m4_t = MVT::nxv4i64;
+constexpr MVT vint64m8_t = MVT::nxv8i64;
+
+constexpr MVT vfloat16mf4_t = MVT::nxv1f16;
+constexpr MVT vfloat16mf2_t = MVT::nxv2f16;
+constexpr MVT vfloat16m1_t = MVT::nxv4f16;
+constexpr MVT vfloat16m2_t = MVT::nxv8f16;
+constexpr MVT vfloat16m4_t = MVT::nxv16f16;
+constexpr MVT vfloat16m8_t = MVT::nxv32f16;
+
+constexpr MVT vfloat32mf2_t = MVT::nxv1f32;
+constexpr MVT vfloat32m1_t = MVT::nxv2f32;
+constexpr MVT vfloat32m2_t = MVT::nxv4f32;
+constexpr MVT vfloat32m4_t = MVT::nxv8f32;
+constexpr MVT vfloat32m8_t = MVT::nxv16f32;
+
+constexpr MVT vfloat64m1_t = MVT::nxv1f64;
+constexpr MVT vfloat64m2_t = MVT::nxv2f64;
+constexpr MVT vfloat64m4_t = MVT::nxv4f64;
+constexpr MVT vfloat64m8_t = MVT::nxv8f64;
+
+constexpr MVT vbool1_t = MVT::nxv64i1;
+constexpr MVT vbool2_t = MVT::nxv32i1;
+constexpr MVT vbool4_t = MVT::nxv16i1;
+constexpr MVT vbool8_t = MVT::nxv8i1;
+constexpr MVT vbool16_t = MVT::nxv4i1;
+constexpr MVT vbool32_t = MVT::nxv2i1;
+constexpr MVT vbool64_t = MVT::nxv1i1;
+
+} // namespace RISCVVMVTs
+
+namespace RISCVVLengthMultiplier {
+
+enum LengthMultiplier {
+  LMul1 = 0,
+  LMul2 = 1,
+  LMul4 = 2,
+  LMul8 = 3,
+  LMulF8 = 5,
+  LMulF4 = 6,
+  LMulF2 = 7
+};
+
+}
+
+namespace RISCVVStandardElementWidth {
+
+enum StandardElementWidth {
+  ElementWidth8 = 0,
+  ElementWidth16 = 1,
+  ElementWidth32 = 2,
+  ElementWidth64 = 3
+};
+
+}
+
+namespace RISCVVPseudosTable {
+
+struct PseudoInfo {
+  unsigned int Pseudo;
+  unsigned int BaseInstr;
+  uint8_t VLIndex;
+  uint8_t SEWIndex;
+  uint8_t MergeOpIndex;
+  uint8_t VLMul;
+
+  int getVLIndex() const { return static_cast<int8_t>(VLIndex); }
+
+  int getSEWIndex() const { return static_cast<int8_t>(SEWIndex); }
+
+  int getMergeOpIndex() const { return static_cast<int8_t>(MergeOpIndex); }
+};
+
+using namespace RISCV;
+
+#define GET_RISCVVPseudosTable_DECL
+#include "RISCVGenSearchableTables.inc"
+
+} // end namespace RISCVVPseudosTable
+
 } // namespace llvm
 
 #endif

diff  --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
new file mode 100644
index 000000000000..bb7228871fc9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
@@ -0,0 +1,62 @@
+# RUN: llc -mtriple riscv64 -mattr=+experimental-v %s  \
+# RUN:     -start-before=finalize-isel -stop-after=finalize-isel -o - \
+# RUN:     | FileCheck --check-prefix=POST-INSERTER %s
+
+# RUN: llc -mtriple riscv64 -mattr=+experimental-v %s  \
+# RUN:     -start-before=finalize-isel -o - \
+# RUN:     | FileCheck --check-prefix=CODEGEN %s
+
+--- |
+  define void @vadd_vint64m1(
+            <vscale x 1 x i64> *%pc,
+            <vscale x 1 x i64> *%pa,
+            <vscale x 1 x i64> *%pb,
+            i64 %vl)
+  {
+    ret void
+  }
+...
+---
+name: vadd_vint64m1
+tracksRegLiveness: true
+body:             |
+  bb.0 (%ir-block.0):
+    liveins: $x10, $x11, $x12, $x13
+
+    %3:gpr = COPY $x13
+    %2:gpr = COPY $x12
+    %1:gpr = COPY $x11
+    %0:gpr = COPY $x10
+    %5:vr = IMPLICIT_DEF
+    %4:vr = PseudoVLE64_V_M1 %5, %1, $noreg, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+    %7:vr = IMPLICIT_DEF
+    %6:vr = PseudoVLE64_V_M1 %7, %2, $noreg, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
+    %9:vr = IMPLICIT_DEF
+    %8:vr = PseudoVADD_VV_M1 %9, killed %4, killed %6, $noreg, %3, 64, implicit $vl, implicit $vtype
+    PseudoVSE64_V_M1 killed %8, %0, $noreg, %3, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
+    PseudoRET
+
+...
+
+# POST-INSERTER: %0:gpr = COPY $x13
+# POST-INSERTER: %4:vr = IMPLICIT_DEF
+# POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, implicit-def $vtype
+# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %4, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+# POST-INSERTER: %6:vr = IMPLICIT_DEF
+# POST-INSERTER: dead %11:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, implicit-def $vtype
+# POST-INSERTER: %7:vr = PseudoVLE64_V_M1 %6, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
+# POST-INSERTER: %8:vr = IMPLICIT_DEF
+# POST-INSERTER: dead %12:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, implicit-def $vtype
+# POST-INSERTER: %9:vr = PseudoVADD_VV_M1 %8, killed %5, killed %7, $noreg, $noreg, -1, implicit $vl, implicit $vtype
+# POST-INSERTER: dead %13:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, implicit-def $vtype
+# POST-INSERTER: PseudoVSE64_V_M1 killed %9, %3, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
+
+# CODEGEN: vsetvli	a4, a3, e64,m1,tu,mu
+# CODEGEN-NEXT: vle64.v	v25, (a1)
+# CODEGEN-NEXT: vsetvli	a1, a3, e64,m1,tu,mu
+# CODEGEN-NEXT: vle64.v	v26, (a2)
+# CODEGEN-NEXT: vsetvli	a1, a3, e64,m1,tu,mu
+# CODEGEN-NEXT: vadd.vv	v25, v25, v26
+# CODEGEN-NEXT: vsetvli	a1, a3, e64,m1,tu,mu
+# CODEGEN-NEXT: vse64.v	v25, (a0)
+# CODEGEN-NEXT: ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll
new file mode 100644
index 000000000000..1ac50da0858c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll
@@ -0,0 +1,41 @@
+; This test shows the evolution of RVV pseudo instructions within isel.
+
+; RUN: llc -mtriple riscv64 -mattr=+experimental-v %s -o %t.pre.mir \
+; RUN:     -stop-before=finalize-isel
+; RUN: cat %t.pre.mir | FileCheck --check-prefix=PRE-INSERTER %s
+
+; RUN: llc -mtriple riscv64 -mattr=+experimental-v %t.pre.mir -o %t.post.mir \
+; RUN:     -start-before=finalize-isel -stop-after=finalize-isel
+; RUN: cat %t.post.mir | FileCheck --check-prefix=POST-INSERTER %s
+
+define void @vadd_vint64m1(
+          <vscale x 1 x i64> *%pc,
+          <vscale x 1 x i64> *%pa,
+          <vscale x 1 x i64> *%pb)
+{
+  %va = load <vscale x 1 x i64>, <vscale x 1 x i64>* %pa
+  %vb = load <vscale x 1 x i64>, <vscale x 1 x i64>* %pb
+  %vc = add <vscale x 1 x i64> %va, %vb
+  store <vscale x 1 x i64> %vc, <vscale x 1 x i64> *%pc
+  ret void
+}
+
+; PRE-INSERTER: %4:vr = IMPLICIT_DEF
+; PRE-INSERTER: %3:vr = PseudoVLE64_V_M1 %4, %1, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+; PRE-INSERTER: %6:vr = IMPLICIT_DEF
+; PRE-INSERTER: %5:vr = PseudoVLE64_V_M1 %6, %2, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
+; PRE-INSERTER: %8:vr = IMPLICIT_DEF
+; PRE-INSERTER: %7:vr = PseudoVADD_VV_M1 %8, killed %3, killed %5, $noreg, $x0, 64, implicit $vl, implicit $vtype
+; PRE-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
+
+; POST-INSERTER: %4:vr = IMPLICIT_DEF
+; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype
+; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %4, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+; POST-INSERTER: %6:vr = IMPLICIT_DEF
+; POST-INSERTER: dead %10:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype
+; POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %6, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
+; POST-INSERTER: %8:vr = IMPLICIT_DEF
+; POST-INSERTER: dead %11:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype
+; POST-INSERTER: %7:vr = PseudoVADD_VV_M1 %8, killed %3, killed %5, $noreg, $noreg, -1, implicit $vl, implicit $vtype
+; POST-INSERTER: dead %12:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype
+; POST-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
new file mode 100644
index 000000000000..68b0b4e18530
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
@@ -0,0 +1,119 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple riscv32 -mattr=+experimental-v %s -o - \
+; RUN:     -verify-machineinstrs | FileCheck %s
+; RUN: llc -mtriple riscv64 -mattr=+experimental-v %s -o - \
+; RUN:     -verify-machineinstrs | FileCheck %s
+
+define void @vadd_vint16m1(<vscale x 4 x i16> *%pc, <vscale x 4 x i16> *%pa, <vscale x 4 x i16> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e16,m1,tu,mu
+; CHECK-NEXT:    vle16.v v25, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e16,m1,tu,mu
+; CHECK-NEXT:    vle16.v v26, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e16,m1,tu,mu
+; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsetvli a1, zero, e16,m1,tu,mu
+; CHECK-NEXT:    vse16.v v25, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 4 x i16>, <vscale x 4 x i16>* %pa
+  %vb = load <vscale x 4 x i16>, <vscale x 4 x i16>* %pb
+  %vc = add <vscale x 4 x i16> %va, %vb
+  store <vscale x 4 x i16> %vc, <vscale x 4 x i16> *%pc
+  ret void
+}
+
+define void @vadd_vint16m2(<vscale x 8 x i16> *%pc, <vscale x 8 x i16> *%pa, <vscale x 8 x i16> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e16,m2,tu,mu
+; CHECK-NEXT:    vle16.v v26, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e16,m2,tu,mu
+; CHECK-NEXT:    vle16.v v28, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e16,m2,tu,mu
+; CHECK-NEXT:    vadd.vv v26, v26, v28
+; CHECK-NEXT:    vsetvli a1, zero, e16,m2,tu,mu
+; CHECK-NEXT:    vse16.v v26, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 8 x i16>, <vscale x 8 x i16>* %pa
+  %vb = load <vscale x 8 x i16>, <vscale x 8 x i16>* %pb
+  %vc = add <vscale x 8 x i16> %va, %vb
+  store <vscale x 8 x i16> %vc, <vscale x 8 x i16> *%pc
+  ret void
+}
+
+define void @vadd_vint16m4(<vscale x 16 x i16> *%pc, <vscale x 16 x i16> *%pa, <vscale x 16 x i16> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e16,m4,tu,mu
+; CHECK-NEXT:    vle16.v v28, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e16,m4,tu,mu
+; CHECK-NEXT:    vle16.v v8, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e16,m4,tu,mu
+; CHECK-NEXT:    vadd.vv v28, v28, v8
+; CHECK-NEXT:    vsetvli a1, zero, e16,m4,tu,mu
+; CHECK-NEXT:    vse16.v v28, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 16 x i16>, <vscale x 16 x i16>* %pa
+  %vb = load <vscale x 16 x i16>, <vscale x 16 x i16>* %pb
+  %vc = add <vscale x 16 x i16> %va, %vb
+  store <vscale x 16 x i16> %vc, <vscale x 16 x i16> *%pc
+  ret void
+}
+
+define void @vadd_vint16m8(<vscale x 32 x i16> *%pc, <vscale x 32 x i16> *%pa, <vscale x 32 x i16> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e16,m8,tu,mu
+; CHECK-NEXT:    vle16.v v8, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,tu,mu
+; CHECK-NEXT:    vle16.v v16, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,tu,mu
+; CHECK-NEXT:    vadd.vv v8, v8, v16
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,tu,mu
+; CHECK-NEXT:    vse16.v v8, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 32 x i16>, <vscale x 32 x i16>* %pa
+  %vb = load <vscale x 32 x i16>, <vscale x 32 x i16>* %pb
+  %vc = add <vscale x 32 x i16> %va, %vb
+  store <vscale x 32 x i16> %vc, <vscale x 32 x i16> *%pc
+  ret void
+}
+
+define void @vadd_vint16mf2(<vscale x 2 x i16> *%pc, <vscale x 2 x i16> *%pa, <vscale x 2 x i16> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e16,mf2,tu,mu
+; CHECK-NEXT:    vle16.v v25, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,tu,mu
+; CHECK-NEXT:    vle16.v v26, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,tu,mu
+; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,tu,mu
+; CHECK-NEXT:    vse16.v v25, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 2 x i16>, <vscale x 2 x i16>* %pa
+  %vb = load <vscale x 2 x i16>, <vscale x 2 x i16>* %pb
+  %vc = add <vscale x 2 x i16> %va, %vb
+  store <vscale x 2 x i16> %vc, <vscale x 2 x i16> *%pc
+  ret void
+}
+
+define void @vadd_vint16mf4(<vscale x 1 x i16> *%pc, <vscale x 1 x i16> *%pa, <vscale x 1 x i16> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e16,mf4,tu,mu
+; CHECK-NEXT:    vle16.v v25, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,tu,mu
+; CHECK-NEXT:    vle16.v v26, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,tu,mu
+; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,tu,mu
+; CHECK-NEXT:    vse16.v v25, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 1 x i16>, <vscale x 1 x i16>* %pa
+  %vb = load <vscale x 1 x i16>, <vscale x 1 x i16>* %pb
+  %vc = add <vscale x 1 x i16> %va, %vb
+  store <vscale x 1 x i16> %vc, <vscale x 1 x i16> *%pc
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
new file mode 100644
index 000000000000..bac63747fb1e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
@@ -0,0 +1,100 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple riscv32 -mattr=+experimental-v %s -o - \
+; RUN:     -verify-machineinstrs | FileCheck %s
+; RUN: llc -mtriple riscv64 -mattr=+experimental-v %s -o - \
+; RUN:     -verify-machineinstrs | FileCheck %s
+
+define void @vadd_vint32m1(<vscale x 2 x i32> *%pc, <vscale x 2 x i32> *%pa, <vscale x 2 x i32> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e32,m1,tu,mu
+; CHECK-NEXT:    vle32.v v25, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e32,m1,tu,mu
+; CHECK-NEXT:    vle32.v v26, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e32,m1,tu,mu
+; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsetvli a1, zero, e32,m1,tu,mu
+; CHECK-NEXT:    vse32.v v25, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 2 x i32>, <vscale x 2 x i32>* %pa
+  %vb = load <vscale x 2 x i32>, <vscale x 2 x i32>* %pb
+  %vc = add <vscale x 2 x i32> %va, %vb
+  store <vscale x 2 x i32> %vc, <vscale x 2 x i32> *%pc
+  ret void
+}
+
+define void @vadd_vint32m2(<vscale x 4 x i32> *%pc, <vscale x 4 x i32> *%pa, <vscale x 4 x i32> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e32,m2,tu,mu
+; CHECK-NEXT:    vle32.v v26, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e32,m2,tu,mu
+; CHECK-NEXT:    vle32.v v28, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e32,m2,tu,mu
+; CHECK-NEXT:    vadd.vv v26, v26, v28
+; CHECK-NEXT:    vsetvli a1, zero, e32,m2,tu,mu
+; CHECK-NEXT:    vse32.v v26, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 4 x i32>, <vscale x 4 x i32>* %pa
+  %vb = load <vscale x 4 x i32>, <vscale x 4 x i32>* %pb
+  %vc = add <vscale x 4 x i32> %va, %vb
+  store <vscale x 4 x i32> %vc, <vscale x 4 x i32> *%pc
+  ret void
+}
+
+define void @vadd_vint32m4(<vscale x 8 x i32> *%pc, <vscale x 8 x i32> *%pa, <vscale x 8 x i32> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e32,m4,tu,mu
+; CHECK-NEXT:    vle32.v v28, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e32,m4,tu,mu
+; CHECK-NEXT:    vle32.v v8, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e32,m4,tu,mu
+; CHECK-NEXT:    vadd.vv v28, v28, v8
+; CHECK-NEXT:    vsetvli a1, zero, e32,m4,tu,mu
+; CHECK-NEXT:    vse32.v v28, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 8 x i32>, <vscale x 8 x i32>* %pa
+  %vb = load <vscale x 8 x i32>, <vscale x 8 x i32>* %pb
+  %vc = add <vscale x 8 x i32> %va, %vb
+  store <vscale x 8 x i32> %vc, <vscale x 8 x i32> *%pc
+  ret void
+}
+
+define void @vadd_vint32m8(<vscale x 16 x i32> *%pc, <vscale x 16 x i32> *%pa, <vscale x 16 x i32> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint32m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e32,m8,tu,mu
+; CHECK-NEXT:    vle32.v v8, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,tu,mu
+; CHECK-NEXT:    vle32.v v16, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,tu,mu
+; CHECK-NEXT:    vadd.vv v8, v8, v16
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,tu,mu
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 16 x i32>, <vscale x 16 x i32>* %pa
+  %vb = load <vscale x 16 x i32>, <vscale x 16 x i32>* %pb
+  %vc = add <vscale x 16 x i32> %va, %vb
+  store <vscale x 16 x i32> %vc, <vscale x 16 x i32> *%pc
+  ret void
+}
+
+define void @vadd_vint32mf2(<vscale x 1 x i32> *%pc, <vscale x 1 x i32> *%pa, <vscale x 1 x i32> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e32,mf2,tu,mu
+; CHECK-NEXT:    vle32.v v25, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,tu,mu
+; CHECK-NEXT:    vle32.v v26, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,tu,mu
+; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,tu,mu
+; CHECK-NEXT:    vse32.v v25, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 1 x i32>, <vscale x 1 x i32>* %pa
+  %vb = load <vscale x 1 x i32>, <vscale x 1 x i32>* %pb
+  %vc = add <vscale x 1 x i32> %va, %vb
+  store <vscale x 1 x i32> %vc, <vscale x 1 x i32> *%pc
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
new file mode 100644
index 000000000000..55eb4937cf36
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
@@ -0,0 +1,81 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple riscv32 -mattr=+experimental-v %s -o - \
+; RUN:     -verify-machineinstrs | FileCheck %s
+; RUN: llc -mtriple riscv64 -mattr=+experimental-v %s -o - \
+; RUN:     -verify-machineinstrs | FileCheck %s
+
+define void @vadd_vint64m1(<vscale x 1 x i64> *%pc, <vscale x 1 x i64> *%pa, <vscale x 1 x i64> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e64,m1,tu,mu
+; CHECK-NEXT:    vle64.v v25, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m1,tu,mu
+; CHECK-NEXT:    vle64.v v26, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m1,tu,mu
+; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsetvli a1, zero, e64,m1,tu,mu
+; CHECK-NEXT:    vse64.v v25, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 1 x i64>, <vscale x 1 x i64>* %pa
+  %vb = load <vscale x 1 x i64>, <vscale x 1 x i64>* %pb
+  %vc = add <vscale x 1 x i64> %va, %vb
+  store <vscale x 1 x i64> %vc, <vscale x 1 x i64> *%pc
+  ret void
+}
+
+define void @vadd_vint64m2(<vscale x 2 x i64> *%pc, <vscale x 2 x i64> *%pa, <vscale x 2 x i64> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e64,m2,tu,mu
+; CHECK-NEXT:    vle64.v v26, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m2,tu,mu
+; CHECK-NEXT:    vle64.v v28, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m2,tu,mu
+; CHECK-NEXT:    vadd.vv v26, v26, v28
+; CHECK-NEXT:    vsetvli a1, zero, e64,m2,tu,mu
+; CHECK-NEXT:    vse64.v v26, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 2 x i64>, <vscale x 2 x i64>* %pa
+  %vb = load <vscale x 2 x i64>, <vscale x 2 x i64>* %pb
+  %vc = add <vscale x 2 x i64> %va, %vb
+  store <vscale x 2 x i64> %vc, <vscale x 2 x i64> *%pc
+  ret void
+}
+
+define void @vadd_vint64m4(<vscale x 4 x i64> *%pc, <vscale x 4 x i64> *%pa, <vscale x 4 x i64> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e64,m4,tu,mu
+; CHECK-NEXT:    vle64.v v28, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m4,tu,mu
+; CHECK-NEXT:    vle64.v v8, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m4,tu,mu
+; CHECK-NEXT:    vadd.vv v28, v28, v8
+; CHECK-NEXT:    vsetvli a1, zero, e64,m4,tu,mu
+; CHECK-NEXT:    vse64.v v28, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 4 x i64>, <vscale x 4 x i64>* %pa
+  %vb = load <vscale x 4 x i64>, <vscale x 4 x i64>* %pb
+  %vc = add <vscale x 4 x i64> %va, %vb
+  store <vscale x 4 x i64> %vc, <vscale x 4 x i64> *%pc
+  ret void
+}
+
+define void @vadd_vint64m8(<vscale x 8 x i64> *%pc, <vscale x 8 x i64> *%pa, <vscale x 8 x i64> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e64,m8,tu,mu
+; CHECK-NEXT:    vle64.v v8, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,tu,mu
+; CHECK-NEXT:    vle64.v v16, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,tu,mu
+; CHECK-NEXT:    vadd.vv v8, v8, v16
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,tu,mu
+; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 8 x i64>, <vscale x 8 x i64>* %pa
+  %vb = load <vscale x 8 x i64>, <vscale x 8 x i64>* %pb
+  %vc = add <vscale x 8 x i64> %va, %vb
+  store <vscale x 8 x i64> %vc, <vscale x 8 x i64> *%pc
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll
new file mode 100644
index 000000000000..5764da47093c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple riscv32 -mattr=+experimental-v %s -o - \
+; RUN:     -verify-machineinstrs | FileCheck %s
+; RUN: llc -mtriple riscv64 -mattr=+experimental-v %s -o - \
+; RUN:     -verify-machineinstrs | FileCheck %s
+
+define void @vadd_vint8m1(<vscale x 8 x i8> *%pc, <vscale x 8 x i8> *%pa, <vscale x 8 x i8> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e8,m1,tu,mu
+; CHECK-NEXT:    vle8.v v25, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e8,m1,tu,mu
+; CHECK-NEXT:    vle8.v v26, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e8,m1,tu,mu
+; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsetvli a1, zero, e8,m1,tu,mu
+; CHECK-NEXT:    vse8.v v25, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 8 x i8>, <vscale x 8 x i8>* %pa
+  %vb = load <vscale x 8 x i8>, <vscale x 8 x i8>* %pb
+  %vc = add <vscale x 8 x i8> %va, %vb
+  store <vscale x 8 x i8> %vc, <vscale x 8 x i8> *%pc
+  ret void
+}
+
+define void @vadd_vint8m2(<vscale x 16 x i8> *%pc, <vscale x 16 x i8> *%pa, <vscale x 16 x i8> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e8,m2,tu,mu
+; CHECK-NEXT:    vle8.v v26, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e8,m2,tu,mu
+; CHECK-NEXT:    vle8.v v28, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e8,m2,tu,mu
+; CHECK-NEXT:    vadd.vv v26, v26, v28
+; CHECK-NEXT:    vsetvli a1, zero, e8,m2,tu,mu
+; CHECK-NEXT:    vse8.v v26, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 16 x i8>, <vscale x 16 x i8>* %pa
+  %vb = load <vscale x 16 x i8>, <vscale x 16 x i8>* %pb
+  %vc = add <vscale x 16 x i8> %va, %vb
+  store <vscale x 16 x i8> %vc, <vscale x 16 x i8> *%pc
+  ret void
+}
+
+define void @vadd_vint8m4(<vscale x 32 x i8> *%pc, <vscale x 32 x i8> *%pa, <vscale x 32 x i8> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e8,m4,tu,mu
+; CHECK-NEXT:    vle8.v v28, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e8,m4,tu,mu
+; CHECK-NEXT:    vle8.v v8, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e8,m4,tu,mu
+; CHECK-NEXT:    vadd.vv v28, v28, v8
+; CHECK-NEXT:    vsetvli a1, zero, e8,m4,tu,mu
+; CHECK-NEXT:    vse8.v v28, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 32 x i8>, <vscale x 32 x i8>* %pa
+  %vb = load <vscale x 32 x i8>, <vscale x 32 x i8>* %pb
+  %vc = add <vscale x 32 x i8> %va, %vb
+  store <vscale x 32 x i8> %vc, <vscale x 32 x i8> *%pc
+  ret void
+}
+
+define void @vadd_vint8m8(<vscale x 64 x i8> *%pc, <vscale x 64 x i8> *%pa, <vscale x 64 x i8> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint8m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e8,m8,tu,mu
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e8,m8,tu,mu
+; CHECK-NEXT:    vle8.v v16, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e8,m8,tu,mu
+; CHECK-NEXT:    vadd.vv v8, v8, v16
+; CHECK-NEXT:    vsetvli a1, zero, e8,m8,tu,mu
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 64 x i8>, <vscale x 64 x i8>* %pa
+  %vb = load <vscale x 64 x i8>, <vscale x 64 x i8>* %pb
+  %vc = add <vscale x 64 x i8> %va, %vb
+  store <vscale x 64 x i8> %vc, <vscale x 64 x i8> *%pc
+  ret void
+}
+
+define void @vadd_vint8mf2(<vscale x 4 x i8> *%pc, <vscale x 4 x i8> *%pa, <vscale x 4 x i8> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e8,mf2,tu,mu
+; CHECK-NEXT:    vle8.v v25, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,tu,mu
+; CHECK-NEXT:    vle8.v v26, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,tu,mu
+; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,tu,mu
+; CHECK-NEXT:    vse8.v v25, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 4 x i8>, <vscale x 4 x i8>* %pa
+  %vb = load <vscale x 4 x i8>, <vscale x 4 x i8>* %pb
+  %vc = add <vscale x 4 x i8> %va, %vb
+  store <vscale x 4 x i8> %vc, <vscale x 4 x i8> *%pc
+  ret void
+}
+
+define void @vadd_vint8mf4(<vscale x 2 x i8> *%pc, <vscale x 2 x i8> *%pa, <vscale x 2 x i8> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e8,mf4,tu,mu
+; CHECK-NEXT:    vle8.v v25, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,tu,mu
+; CHECK-NEXT:    vle8.v v26, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,tu,mu
+; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,tu,mu
+; CHECK-NEXT:    vse8.v v25, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 2 x i8>, <vscale x 2 x i8>* %pa
+  %vb = load <vscale x 2 x i8>, <vscale x 2 x i8>* %pb
+  %vc = add <vscale x 2 x i8> %va, %vb
+  store <vscale x 2 x i8> %vc, <vscale x 2 x i8> *%pc
+  ret void
+}
+
+define void @vadd_vint8mf8(<vscale x 1 x i8> *%pc, <vscale x 1 x i8> *%pa, <vscale x 1 x i8> *%pb) nounwind {
+; CHECK-LABEL: vadd_vint8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a3, zero, e8,mf8,tu,mu
+; CHECK-NEXT:    vle8.v v25, (a1)
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,tu,mu
+; CHECK-NEXT:    vle8.v v26, (a2)
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,tu,mu
+; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,tu,mu
+; CHECK-NEXT:    vse8.v v25, (a0)
+; CHECK-NEXT:    ret
+  %va = load <vscale x 1 x i8>, <vscale x 1 x i8>* %pa
+  %vb = load <vscale x 1 x i8>, <vscale x 1 x i8>* %pb
+  %vc = add <vscale x 1 x i8> %va, %vb
+  store <vscale x 1 x i8> %vc, <vscale x 1 x i8> *%pc
+  ret void
+}

diff  --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp
index f81876bc4391..3a9d298ff478 100644
--- a/llvm/utils/TableGen/GlobalISelEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp
@@ -187,12 +187,16 @@ class InstructionMatcher;
 static Optional<LLTCodeGen> MVTToLLT(MVT::SimpleValueType SVT) {
   MVT VT(SVT);
 
-  if (VT.isVector() && VT.getVectorNumElements() != 1)
+  if (VT.isScalableVector())
+    return None;
+
+  if (VT.isFixedLengthVector() && VT.getVectorNumElements() != 1)
     return LLTCodeGen(
         LLT::vector(VT.getVectorNumElements(), VT.getScalarSizeInBits()));
 
   if (VT.isInteger() || VT.isFloatingPoint())
     return LLTCodeGen(LLT::scalar(VT.getSizeInBits()));
+
   return None;
 }
 


        


More information about the llvm-commits mailing list