[llvm] [RISCV][GISEL] Legalize G_INSERT and G_EXTRACT for scalable vectors (PR #108220)

Michael Maitland via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 11 06:13:22 PDT 2024


https://github.com/michaelmaitland created https://github.com/llvm/llvm-project/pull/108220

This PR legalizes G_INSERT and G_EXTRACT for scalable vectors. It is heavily based off the SelectionDAG code.

>From 35e9fd62d8ba2c7ab25fa49190714a3d2793910a Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 6 Mar 2024 09:50:24 -0800
Subject: [PATCH 1/2] [RISCV][GISEL] Add opcodes needed to legalize G_INSERT
 and G_EXTRACT

---
 llvm/lib/Target/RISCV/RISCVInstrGISel.td | 76 ++++++++++++++++++++++++
 1 file changed, 76 insertions(+)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrGISel.td b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
index ba40662c49c1df..4150f2b2c93777 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
@@ -41,6 +41,82 @@ def G_VMCLR_VL : RISCVGenericInstruction {
 }
 def : GINodeEquiv<G_VMCLR_VL, riscv_vmclr_vl>;
 
+// Pseudo equivalent to a RISCVISD::VFMV_S_F_VL
+def G_VFMV_S_F_VL : RISCVGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$vec, type1:$scalar, type2:$vl);
+  let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VFMV_S_F_VL, riscv_vfmv_s_f_vl>;
+
+// Pseudo equivalent to a RISCVISD::VFMV_V_F_VL
+def G_VFMV_V_F_VL : RISCVGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$vec, type1:$scalar, type2:$vl);
+  let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VFMV_V_F_VL, riscv_vfmv_v_f_vl>;
+
+// Pseudo equivalent to a RISCVISD::VMV_S_X_VL
+def G_VMV_S_X_VL : RISCVGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$vec, type1:$scalar, type2:$vl);
+  let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VMV_S_X_VL, riscv_vmv_s_x_vl>;
+
+// Pseudo equivalent to a RISCVISD::VMV_V_X_VL
+def G_VMV_V_X_VL : RISCVGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$vec, type1:$scalar, type2:$vl);
+  let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VMV_V_X_VL, riscv_vmv_v_x_vl>;
+
+// Pseudo equivalent to a RISCVISD::VMV_V_V_VL
+def G_VMV_V_V_VL : RISCVGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$vec, type2:$vl);
+  let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VMV_V_V_VL, riscv_vmv_v_v_vl>;
+
+// This instruction is a vector move instruction where the type of the element
+// can be either a integer or a floating point but the registerbank information
+// is not yet available to decide between G_VMV_V_X and G_VFMV_V_F.
+def G_VEC_MOVE_VL : RISCVGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$vec, type1:$scalar, type2:$vl);
+  let hasSideEffects = false;
+}
+
+// This instruction is a scalar move instruction where the type of the element
+// can be either a integer or a floating point but the registerbank information
+// is not yet available to decide between G_VMV_S_X and G_VFMV_S_F.
+def G_SCALAR_MOVE_VL : RISCVGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$vec, type1:$scalar, type2:$vl);
+  let hasSideEffects = false;
+}
+
+// Pseudo equivalent to a RISCVISD::VSLIDEUP_VL
+def G_VSLIDEUP_VL : RISCVGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$merge, type0:$vec, type1:$idx, type2:$mask,
+                       type3:$vl, type4:$policy);
+  let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VSLIDEUP_VL, riscv_slideup_vl>;
+
+// Pseudo equivalent to a RISCVISD::VSLIDEDOWN_VL
+def G_VSLIDEDOWN_VL : RISCVGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type0:$merge, type0:$vec, type1:$idx, type2:$mask,
+                       type3:$vl, type4:$policy);
+  let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VSLIDEDOWN_VL, riscv_slidedown_vl>;
+
 // Pseudo equivalent to a RISCVISD::VMSET_VL
 def G_VMSET_VL : RISCVGenericInstruction {
   let OutOperandList = (outs type0:$dst);

>From bc111494680fe73db38174e93c16012270d6b9ec Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 5 Mar 2024 11:09:15 -0800
Subject: [PATCH 2/2] [RISCV][GISEL] Legalize G_INSERT and G_EXTRACT for
 scalable vectors

---
 .../CodeGen/GlobalISel/MachineIRBuilder.cpp   |   8 +-
 llvm/lib/CodeGen/MachineVerifier.cpp          |  16 +-
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 284 ++++++++++++++++++
 .../Target/RISCV/GISel/RISCVLegalizerInfo.h   |   2 +
 .../legalizer/rvv/legalize-extract.mir        | 278 +++++++++++++++++
 .../legalizer/rvv/legalize-insert.mir         | 278 +++++++++++++++++
 6 files changed, 856 insertions(+), 10 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insert.mir

diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 925a1c7cf6aacc..96ca99a3871d8e 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -621,7 +621,8 @@ MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
 #ifndef NDEBUG
   assert(SrcTy.isValid() && "invalid operand type");
   assert(DstTy.isValid() && "invalid operand type");
-  assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
+  assert(TypeSize::isKnownLE(DstTy.getSizeInBits().getWithIncrement(Index),
+                             SrcTy.getSizeInBits()) &&
          "extracting off end of register");
 #endif
 
@@ -797,8 +798,9 @@ MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
                                                   const SrcOp &Src,
                                                   const SrcOp &Op,
                                                   unsigned Index) {
-  assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
-             Res.getLLTTy(*getMRI()).getSizeInBits() &&
+  assert(TypeSize::isKnownLE(
+             Op.getLLTTy(*getMRI()).getSizeInBits().getWithIncrement(Index),
+             Res.getLLTTy(*getMRI()).getSizeInBits()) &&
          "insertion past the end of a register");
 
   if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 759201ed9dadc7..606929bb594e93 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1587,12 +1587,13 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
       break;
     }
 
-    unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
-    unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
+    TypeSize DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
+    TypeSize SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
     if (SrcSize == DstSize)
       report("extract source must be larger than result", MI);
 
-    if (DstSize + OffsetOp.getImm() > SrcSize)
+    if (DstSize.getKnownMinValue() + OffsetOp.getImm() >
+        SrcSize.getKnownMinValue())
       report("extract reads past end of register", MI);
     break;
   }
@@ -1609,13 +1610,14 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
       break;
     }
 
-    unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
-    unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
+    TypeSize DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
+    TypeSize SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
 
-    if (DstSize <= SrcSize)
+    if (TypeSize::isKnownLE(DstSize, SrcSize))
       report("inserted size must be smaller than total register", MI);
 
-    if (SrcSize + OffsetOp.getImm() > DstSize)
+    if (SrcSize.getKnownMinValue() + OffsetOp.getImm() >
+        DstSize.getKnownMinValue())
       report("insert writes past end of register", MI);
 
     break;
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 64e8ee76e83915..25686799d377ff 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -580,6 +580,16 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
 
   SplatActions.clampScalar(1, sXLen, sXLen);
 
+  getActionDefinitionsBuilder(G_INSERT)
+      .customIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST),
+                    typeIsLegalBoolVec(1, BoolVecTys, ST)))
+      .customIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
+                    typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)));
+
+  getActionDefinitionsBuilder(G_EXTRACT)
+      .customIf(typeIsLegalBoolVec(0, BoolVecTys, ST))
+      .customIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST));
+
   getLegacyLegalizerInfo().computeTables();
 }
 
@@ -802,6 +812,14 @@ bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
   return true;
 }
 
+static LLT getLMUL1Ty(LLT VecTy) {
+  assert(VecTy.getElementType().getSizeInBits() <= 64 &&
+         "Unexpected vector LLT");
+  return LLT::scalable_vector(RISCV::RVVBitsPerBlock /
+                                  VecTy.getElementType().getSizeInBits(),
+                              VecTy.getElementType());
+}
+
 /// Return the type of the mask type suitable for masking the provided
 /// vector type.  This is simply an i1 element type vector of the same
 /// (possibly scalable) length.
@@ -858,6 +876,11 @@ buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru,
                                   Unmerge.getReg(1), VL, MIB, MRI);
 }
 
+static MachineInstrBuilder buildVLMax(LLT VecTy, MachineIRBuilder &MIB) {
+  assert(VecTy.isScalableVector() && "Expected scalable vector");
+  return MIB.buildVScale(VecTy, VecTy.getElementCount().getKnownMinValue());
+}
+
 // Lower splats of s1 types to G_ICMP. For each mask vector type, we have a
 // legal equivalently-sized i8 type, so we can use that as a go-between.
 // Splats of s1 types that have constant value can be legalized as VMSET_VL or
@@ -914,6 +937,263 @@ bool RISCVLegalizerInfo::legalizeSplatVector(MachineInstr &MI,
   return true;
 }
 
+bool RISCVLegalizerInfo::legalizeInsert(MachineInstr &MI,
+                                        MachineIRBuilder &MIB) const {
+  assert(MI.getOpcode() == TargetOpcode::G_INSERT);
+
+  MachineRegisterInfo &MRI = *MIB.getMRI();
+
+  Register Dst = MI.getOperand(0).getReg();
+  Register Src1 = MI.getOperand(1).getReg();
+  Register Src2 = MI.getOperand(2).getReg();
+  uint64_t Idx = MI.getOperand(3).getImm();
+
+  LLT LitTy = MRI.getType(Src2);
+  Register BigVec = Src1;
+  Register LitVec = Src2;
+
+  // We don't have the ability to slide mask vectors up indexed by their i1
+  // elements; the smallest we can do is i8. Often we are able to bitcast to
+  // equivalent i8 vectors. Otherwise, we can must zeroextend to equivalent i8
+  // vectors and truncate down after the insert.
+  if (LitTy.getElementType() == LLT::scalar(1) &&
+      (Idx != 0 ||
+       MRI.getVRegDef(BigVec)->getOpcode() != TargetOpcode::G_IMPLICIT_DEF)) {
+    auto BigTyMinElts = BigTy.getElementCount().getKnownMinValue();
+    auto LitTyMinElts = LitTy.getElementCount().getKnownMinValue();
+    if (BigTyMinElts >= 8 && LitTyMinElts >= 8) {
+      assert(Idx % 8 == 0 && "Invalid index");
+      assert(BigTyMinElts % 8 == 0 && LitTyMinElts % 8 == 0 &&
+             "Unexpected mask vector lowering");
+      Idx /= 8;
+      BigTy = LLT::vector(BigTy.getElementCount().divideCoefficientBy(8), 8);
+      LitTy = LLT::vector(LitTy.getElementCount().divideCoefficientBy(8), 8);
+      BigVec = MIB.buildBitcast(BigTy, BigVec).getReg(0);
+      LitVec = MIB.buildBitcast(LitTy, LitVec).getReg(0);
+    } else {
+      // We can't slide this mask vector up indexed by its i1 elements.
+      // This poses a problem when we wish to insert a scalable vector which
+      // can't be re-expressed as a larger type. Just choose the slow path and
+      // extend to a larger type, then truncate back down.
+      LLT ExtBigTy = BigTy.changeElementType(LLT::scalar(8));
+      LLT ExtLitTy = LitTy.changeElementType(LLT::scalar(8));
+      auto BigZExt = MIB.buildZExt(ExtBigTy, BigVec);
+      auto LitZExt = MIB.buildZExt(ExtLitTy, LitVec);
+      auto Insert = MIB.buildInsert(ExtBigTy, BigZExt, LitZExt, Idx);
+      auto SplatZero = MIB.buildConstant(ExtBigTy, 0);
+      MIB.buildICmp(CmpInst::Predicate::ICMP_NE, Dst, Insert, SplatZero);
+      MI.eraseFromParent();
+      return true;
+    }
+  }
+
+  const RISCVRegisterInfo *TRI = STI.getRegisterInfo();
+  MVT LitTyMVT = getMVTForLLT(LitTy);
+  unsigned SubRegIdx, RemIdx;
+  std::tie(SubRegIdx, RemIdx) =
+      RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
+          getMVTForLLT(BigTy), LitTyMVT, Idx, TRI);
+
+  RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(LitTy));
+  bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
+                         SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
+                         SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
+
+  // If the Idx has been completely eliminated and this subvector's size is a
+  // vector register or a multiple thereof, or the surrounding elements are
+  // undef, then this is a subvector insert which naturally aligns to a vector
+  // register. These can easily be handled using subregister manipulation.
+  if (RemIdx == 0 && (!IsSubVecPartReg || MRI.getVRegDef(Src1)->getOpcode() ==
+                                              TargetOpcode::G_IMPLICIT_DEF))
+    return true;
+
+  // If the subvector is smaller than a vector register, then the insertion
+  // must preserve the undisturbed elements of the register. We do this by
+  // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
+  // (which resolves to a subregister copy), performing a VSLIDEUP to place the
+  // subvector within the vector register, and an INSERT_SUBVECTOR of that
+  // LMUL=1 type back into the larger vector (resolving to another subregister
+  // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
+  // to avoid allocating a large register group to hold our subvector.
+
+  // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
+  // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
+  // (in our case undisturbed). This means we can set up a subvector insertion
+  // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
+  // size of the subvector.
+  const LLT XLenTy(STI.getXLenVT());
+  LLT InterLitTy = BigTy;
+  Register AlignedExtract = Src1;
+  unsigned AlignedIdx = Idx - RemIdx;
+  if (TypeSize::isKnownGT(BigTy.getSizeInBits(),
+                          getLMUL1Ty(BigTy).getSizeInBits())) {
+    InterLitTy = getLMUL1Ty(BigTy);
+    // Extract a subvector equal to the nearest full vector register type. This
+    // should resolve to a G_EXTRACT on a subreg.
+    AlignedExtract = MIB.buildExtract(InterLitTy, BigVec, AlignedIdx).getReg(0);
+  }
+
+  auto Insert =
+      MIB.buildInsert(InterLitTy, MIB.buildUndef(InterLitTy), LitVec, 0);
+
+  auto [Mask, VL] = buildDefaultVLOps(BigTy, MIB, MRI);
+
+  ElementCount EndIndex =
+      ElementCount::getScalable(RemIdx) + LitTy.getElementCount();
+  VL = buildVLMax(LitTy, MIB).getReg(0);
+
+  // Use tail agnostic policy if we're inserting over InterLitTy's tail.
+  uint64_t Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
+  if (EndIndex == InterLitTy.getElementCount())
+    Policy = RISCVII::TAIL_AGNOSTIC;
+
+  // If we're inserting into the lowest elements, use a tail undisturbed
+  // vmv.v.v.
+  Register Inserted;
+  if (RemIdx == 0) {
+    Inserted = MIB.buildInstr(RISCV::G_VMV_V_V_VL, {InterLitTy},
+                              {AlignedExtract, Insert, VL})
+                   .getReg(0);
+  } else {
+    auto SlideupAmt = MIB.buildVScale(XLenTy, RemIdx);
+    // Construct the vector length corresponding to RemIdx + length(LitTy).
+    VL = MIB.buildAdd(XLenTy, SlideupAmt, VL).getReg(0);
+    Inserted =
+        MIB.buildInstr(RISCV::G_VSLIDEUP_VL, {InterLitTy},
+                       {AlignedExtract, LitVec, SlideupAmt, Mask, VL, Policy})
+            .getReg(0);
+  }
+
+  // If required, insert this subvector back into the correct vector register.
+  // This should resolve to an INSERT_SUBREG instruction.
+  if (TypeSize::isKnownGT(BigTy.getSizeInBits(), InterLitTy.getSizeInBits()))
+    Inserted = MIB.buildInsert(BigTy, BigVec, LitVec, AlignedIdx).getReg(0);
+
+  // We might have bitcast from a mask type: cast back to the original type if
+  // required.
+  MIB.buildBitcast(Dst, Inserted);
+
+  MI.eraseFromParent();
+  return true;
+}
+
+bool RISCVLegalizerInfo::legalizeExtract(MachineInstr &MI,
+                                         MachineIRBuilder &MIB) const {
+  assert(MI.getOpcode() == TargetOpcode::G_EXTRACT);
+
+  MachineRegisterInfo &MRI = *MIB.getMRI();
+
+  Register Dst = MI.getOperand(0).getReg();
+  Register Src = MI.getOperand(1).getReg();
+  uint64_t Idx = MI.getOperand(2).getImm();
+
+  // Only support vectors using custom legalization
+  LLT LitTy = MRI.getType(Dst);
+  if (LitTy.isScalar())
+    return false;
+
+  LLT BigTy = MRI.getType(Src);
+  Register Vec = Src;
+
+  // We don't have the ability to slide mask vectors down indexed by their i1
+  // elements; the smallest we can do is i8. Often we are able to bitcast to
+  // equivalent i8 vectors.
+  if (LitTy.getElementType() == LLT::scalar(1) && Idx != 0) {
+    auto BigTyMinElts = BigTy.getElementCount().getKnownMinValue();
+    auto LitTyMinElts = LitTy.getElementCount().getKnownMinValue();
+    if (BigTyMinElts >= 8 && LitTyMinElts >= 8) {
+      assert(Idx % 8 == 0 && "Invalid index");
+      assert(BigTyMinElts % 8 == 0 && LitTyMinElts % 8 == 0 &&
+             "Unexpected mask vector lowering");
+      Idx /= 8;
+      BigTy = LLT::vector(BigTy.getElementCount().divideCoefficientBy(8), 8);
+      LitTy = LLT::vector(LitTy.getElementCount().divideCoefficientBy(8), 8);
+      Vec = MIB.buildBitcast(BigTy, Vec).getReg(0);
+    } else {
+      // We can't slide this mask vector up indexed by its i1 elements.
+      // This poses a problem when we wish to insert a scalable vector which
+      // can't be re-expressed as a larger type. Just choose the slow path and
+      // extend to a larger type, then truncate back down.
+      LLT ExtBigTy = BigTy.changeElementType(LLT::scalar(8));
+      LLT ExtLitTy = LitTy.changeElementType(LLT::scalar(8));
+      auto BigZExt = MIB.buildZExt(ExtBigTy, Vec);
+      auto ExtractZExt = MIB.buildExtract(ExtLitTy, BigZExt, Idx);
+      auto SplatZero = MIB.buildSplatVector(
+          ExtLitTy, MIB.buildConstant(ExtLitTy.getElementType(), 0));
+      MIB.buildICmp(CmpInst::Predicate::ICMP_NE, Dst, ExtractZExt, SplatZero);
+      MI.eraseFromParent();
+      return true;
+    }
+  }
+
+  // With an index of 0 this is a cast-like subvector, which can be performed
+  // with subregister operations.
+  if (Idx == 0)
+    return true;
+
+  // extract_subvector scales the index by vscale if the subvector is scalable,
+  // and decomposeSubvectorInsertExtractToSubRegs takes this into account.
+  const RISCVRegisterInfo *TRI = STI.getRegisterInfo();
+  MVT LitTyMVT = getMVTForLLT(LitTy);
+  unsigned SubRegIdx;
+  ElementCount RemIdx;
+  auto Decompose =
+      RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
+          getMVTForLLT(BigTy), LitTyMVT, Idx, TRI);
+  SubRegIdx = Decompose.first;
+  RemIdx = ElementCount::getScalable(Decompose.second);
+
+  // If the Idx has been completely eliminated then this is a subvector extract
+  // which naturally aligns to a vector register. These can easily be handled
+  // using subregister manipulation.
+  // TODO: add tests
+  if (RemIdx.isZero()) {
+    assert(false);
+    return true;
+  }
+
+  // Else LitTy is M1 or smaller and may need to be slid down: if LitTy
+  // was > M1 then the index would need to be a multiple of VLMAX, and so would
+  // divide exactly.
+  assert(
+      RISCVVType::decodeVLMUL(RISCVTargetLowering::getLMUL(LitTyMVT)).second ||
+      RISCVTargetLowering::getLMUL(LitTyMVT) == RISCVII::VLMUL::LMUL_1);
+
+  // If the vector type is an LMUL-group type, extract a subvector equal to the
+  // nearest full vector register type.
+  LLT InterLitTy = BigTy;
+  if (TypeSize::isKnownGT(BigTy.getSizeInBits(),
+                          getLMUL1Ty(BigTy).getSizeInBits())) {
+    // If BigTy has an LMUL > 1, then LitTy should have a smaller LMUL, and
+    // we should have successfully decomposed the extract into a subregister.
+    assert(SubRegIdx != RISCV::NoSubRegister);
+    InterLitTy = getLMUL1Ty(BigTy);
+    // TODO: need to make this a G_EXTRACT_SUBREG?
+    Vec = MIB.buildExtract(InterLitTy, Vec, SubRegIdx).getReg(0);
+  }
+
+  // Slide this vector register down by the desired number of elements in order
+  // to place the desired subvector starting at element 0.
+  const LLT XLenTy(STI.getXLenVT());
+  auto SlidedownAmt = MIB.buildVScale(XLenTy, RemIdx.getKnownMinValue());
+  auto [Mask, VL] = buildDefaultVLOps(LitTy, MIB, MRI);
+  uint64_t Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
+  auto Slidedown = MIB.buildInstr(
+      RISCV::G_VSLIDEDOWN_VL, {InterLitTy},
+      {MIB.buildUndef(InterLitTy), Vec, SlidedownAmt, Mask, VL, Policy});
+
+  // Now the vector is in the right position, extract our final subvector. This
+  // should resolve to a COPY.
+  auto Extract = MIB.buildExtract(LitTy, Slidedown, 0);
+
+  // We might have bitcast from a mask type: cast back to the original type if
+  // required.
+  MIB.buildBitcast(Dst, Extract);
+
+  MI.eraseFromParent();
+  return true;
+}
+
 bool RISCVLegalizerInfo::legalizeCustom(
     LegalizerHelper &Helper, MachineInstr &MI,
     LostDebugLocObserver &LocObserver) const {
@@ -987,6 +1267,10 @@ bool RISCVLegalizerInfo::legalizeCustom(
   case TargetOpcode::G_LOAD:
   case TargetOpcode::G_STORE:
     return legalizeLoadStore(MI, Helper, MIRBuilder);
+  case TargetOpcode::G_INSERT:
+    return legalizeInsert(MI, MIRBuilder);
+  case TargetOpcode::G_EXTRACT:
+    return legalizeExtract(MI, MIRBuilder);
   }
 
   llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index 2fc28615e7630d..c71a9158fe2f4a 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -48,6 +48,8 @@ class RISCVLegalizerInfo : public LegalizerInfo {
   bool legalizeSplatVector(MachineInstr &MI, MachineIRBuilder &MIB) const;
   bool legalizeLoadStore(MachineInstr &MI, LegalizerHelper &Helper,
                          MachineIRBuilder &MIB) const;
+  bool legalizeInsert(MachineInstr &MI, MachineIRBuilder &MIB) const;
+  bool legalizeExtract(MachineInstr &MI, MachineIRBuilder &MIB) const;
 };
 } // end namespace llvm
 #endif
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract.mir
new file mode 100644
index 00000000000000..c5f503f34eaa48
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract.mir
@@ -0,0 +1,278 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+# Special handling for i1-element vectors with non-zero index
+---
+name:            extract_nxv2i1_nxv4i1
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv2i1_nxv4i1
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+    ; CHECK-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C2]](s64)
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), $x0, 3
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT [[VSLIDEDOWN_VL]](<vscale x 4 x s8>), 0
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s8>) = G_BITCAST [[EXTRACT]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[BITCAST]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 2 x s1>) = G_EXTRACT %0(<vscale x 4 x s1>), 2
+    $v8 = COPY %1(<vscale x 2 x s1>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv4i1_nxv8i1
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv4i1_nxv8i1
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+    ; CHECK-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C2]](s64)
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 4 x s1>), $x0, 3
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s8>) = G_BITCAST [[EXTRACT]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[BITCAST]](<vscale x 4 x s8>), [[SPLAT_VECTOR2]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 4 x s1>) = G_EXTRACT %0(<vscale x 8 x s1>), 2
+    $v8 = COPY %1(<vscale x 4 x s1>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv32i1_nxv64i1
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv32i1_nxv64i1
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[DEF]](<vscale x 64 x s1>)
+    ; CHECK-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[BITCAST]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 4 x s1>), $x0, 3
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
+    ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_BITCAST [[EXTRACT]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[BITCAST1]](<vscale x 32 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 32 x s1>) = G_EXTRACT %0(<vscale x 64 x s1>), 16
+    $v8 = COPY %1(<vscale x 32 x s1>)
+    PseudoRET implicit $v8
+...
+
+# i1-element vectors with zero index
+---
+name:            extract_nxv2i1_nxv4i1_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv2i1_nxv4i1_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 2 x s1>) = G_EXTRACT [[DEF]](<vscale x 4 x s1>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 2 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 2 x s1>) = G_EXTRACT %0(<vscale x 4 x s1>), 0
+    $v8 = COPY %1(<vscale x 2 x s1>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv4i1_nxv8i1_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv4i1_nxv8i1_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 4 x s1>) = G_EXTRACT [[DEF]](<vscale x 8 x s1>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 4 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 4 x s1>) = G_EXTRACT %0(<vscale x 8 x s1>), 0
+    $v8 = COPY %1(<vscale x 4 x s1>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv32i1_nxv64i1_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv32i1_nxv64i1_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 32 x s1>) = G_EXTRACT [[DEF]](<vscale x 64 x s1>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 32 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 32 x s1>) = G_EXTRACT %0(<vscale x 64 x s1>), 0
+    $v8 = COPY %1(<vscale x 32 x s1>)
+    PseudoRET implicit $v8
+...
+
+# Extract with zero index
+---
+name:            extract_nxv1i8_nxv2i8_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv1i8_nxv2i8_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_EXTRACT [[DEF]](<vscale x 2 x s8>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 1 x s8>) = G_EXTRACT %0(<vscale x 2 x s8>), 0
+    $v8 = COPY %1(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv2i16_nxv4i16_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv2i16_nxv4i16_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_EXTRACT [[DEF]](<vscale x 4 x s16>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 2 x s16>) = G_EXTRACT %0(<vscale x 4 x s16>), 0
+    $v8 = COPY %1(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv4i32_nxv8i32_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv4i32_nxv8i32_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_EXTRACT [[DEF]](<vscale x 8 x s32>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 4 x s32>) = G_EXTRACT %0(<vscale x 8 x s32>), 0
+    $v8 = COPY %1(<vscale x 4 x s32>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv2i64_nxv8i64_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv2i64_nxv8i64_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_EXTRACT [[DEF]](<vscale x 8 x s64>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 2 x s64>) = G_EXTRACT %0(<vscale x 8 x s64>), 0
+    $v8 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8
+...
+
+# Extract with non-zero index
+---
+name:            extract_nxv1i8_nxv2i8
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv1i8_nxv2i8
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_EXTRACT [[DEF]](<vscale x 2 x s8>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 1 x s8>) = G_EXTRACT %0(<vscale x 2 x s8>), 0
+    $v8 = COPY %1(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv2i16_nxv4i16
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv2i16_nxv4i16
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_EXTRACT [[DEF]](<vscale x 4 x s16>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 2 x s16>) = G_EXTRACT %0(<vscale x 4 x s16>), 0
+    $v8 = COPY %1(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv4i32_nxv8i32
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv4i32_nxv8i32
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_EXTRACT [[DEF]](<vscale x 8 x s32>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 4 x s32>) = G_EXTRACT %0(<vscale x 8 x s32>), 0
+    $v8 = COPY %1(<vscale x 4 x s32>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv2i64_nxv8i64
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv2i64_nxv8i64
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_EXTRACT [[DEF]](<vscale x 8 x s64>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 2 x s64>) = G_EXTRACT %0(<vscale x 8 x s64>), 0
+    $v8 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insert.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insert.mir
new file mode 100644
index 00000000000000..c5f503f34eaa48
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insert.mir
@@ -0,0 +1,278 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+# Special handling for i1-element vectors with non-zero index
+---
+name:            extract_nxv2i1_nxv4i1
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv2i1_nxv4i1
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+    ; CHECK-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C2]](s64)
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), $x0, 3
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT [[VSLIDEDOWN_VL]](<vscale x 4 x s8>), 0
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s8>) = G_BITCAST [[EXTRACT]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[BITCAST]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 2 x s1>) = G_EXTRACT %0(<vscale x 4 x s1>), 2
+    $v8 = COPY %1(<vscale x 2 x s1>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv4i1_nxv8i1
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv4i1_nxv8i1
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+    ; CHECK-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C2]](s64)
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 4 x s1>), $x0, 3
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s8>) = G_BITCAST [[EXTRACT]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[BITCAST]](<vscale x 4 x s8>), [[SPLAT_VECTOR2]]
+    ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 4 x s1>) = G_EXTRACT %0(<vscale x 8 x s1>), 2
+    $v8 = COPY %1(<vscale x 4 x s1>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv32i1_nxv64i1
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv32i1_nxv64i1
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[DEF]](<vscale x 64 x s1>)
+    ; CHECK-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[BITCAST]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 4 x s1>), $x0, 3
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
+    ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_BITCAST [[EXTRACT]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[BITCAST1]](<vscale x 32 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 32 x s1>) = G_EXTRACT %0(<vscale x 64 x s1>), 16
+    $v8 = COPY %1(<vscale x 32 x s1>)
+    PseudoRET implicit $v8
+...
+
+# i1-element vectors with zero index
+---
+name:            extract_nxv2i1_nxv4i1_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv2i1_nxv4i1_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 2 x s1>) = G_EXTRACT [[DEF]](<vscale x 4 x s1>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 2 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 2 x s1>) = G_EXTRACT %0(<vscale x 4 x s1>), 0
+    $v8 = COPY %1(<vscale x 2 x s1>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv4i1_nxv8i1_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv4i1_nxv8i1_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 4 x s1>) = G_EXTRACT [[DEF]](<vscale x 8 x s1>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 4 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 4 x s1>) = G_EXTRACT %0(<vscale x 8 x s1>), 0
+    $v8 = COPY %1(<vscale x 4 x s1>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv32i1_nxv64i1_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv32i1_nxv64i1_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 32 x s1>) = G_EXTRACT [[DEF]](<vscale x 64 x s1>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 32 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 32 x s1>) = G_EXTRACT %0(<vscale x 64 x s1>), 0
+    $v8 = COPY %1(<vscale x 32 x s1>)
+    PseudoRET implicit $v8
+...
+
+# Extract with zero index
+---
+name:            extract_nxv1i8_nxv2i8_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv1i8_nxv2i8_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_EXTRACT [[DEF]](<vscale x 2 x s8>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 1 x s8>) = G_EXTRACT %0(<vscale x 2 x s8>), 0
+    $v8 = COPY %1(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv2i16_nxv4i16_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv2i16_nxv4i16_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_EXTRACT [[DEF]](<vscale x 4 x s16>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 2 x s16>) = G_EXTRACT %0(<vscale x 4 x s16>), 0
+    $v8 = COPY %1(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv4i32_nxv8i32_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv4i32_nxv8i32_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_EXTRACT [[DEF]](<vscale x 8 x s32>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 4 x s32>) = G_EXTRACT %0(<vscale x 8 x s32>), 0
+    $v8 = COPY %1(<vscale x 4 x s32>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv2i64_nxv8i64_zero
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv2i64_nxv8i64_zero
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_EXTRACT [[DEF]](<vscale x 8 x s64>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 2 x s64>) = G_EXTRACT %0(<vscale x 8 x s64>), 0
+    $v8 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8
+...
+
+# Extract with non-zero index
+---
+name:            extract_nxv1i8_nxv2i8
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv1i8_nxv2i8
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_EXTRACT [[DEF]](<vscale x 2 x s8>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 1 x s8>) = G_EXTRACT %0(<vscale x 2 x s8>), 0
+    $v8 = COPY %1(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv2i16_nxv4i16
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv2i16_nxv4i16
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_EXTRACT [[DEF]](<vscale x 4 x s16>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 2 x s16>) = G_EXTRACT %0(<vscale x 4 x s16>), 0
+    $v8 = COPY %1(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv4i32_nxv8i32
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv4i32_nxv8i32
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_EXTRACT [[DEF]](<vscale x 8 x s32>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 4 x s32>) = G_EXTRACT %0(<vscale x 8 x s32>), 0
+    $v8 = COPY %1(<vscale x 4 x s32>)
+    PseudoRET implicit $v8
+...
+---
+name:            extract_nxv2i64_nxv8i64
+legalized:       false
+tracksRegLiveness: true
+body:             |
+  bb.0.entry:
+    ; CHECK-LABEL: name: extract_nxv2i64_nxv8i64
+    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_EXTRACT [[DEF]](<vscale x 8 x s64>), 0
+    ; CHECK-NEXT: $v8 = COPY [[EXTRACT]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+    %1:_(<vscale x 2 x s64>) = G_EXTRACT %0(<vscale x 8 x s64>), 0
+    $v8 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8
+...



More information about the llvm-commits mailing list