[llvm] f957d08 - [RISCV][GISEL] Legalize G_EXTRACT_SUBVECTOR (#109426)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 1 11:08:53 PDT 2024
Author: Michael Maitland
Date: 2024-10-01T14:08:49-04:00
New Revision: f957d080e9e9702ea9519d795f549e2bc37e3067
URL: https://github.com/llvm/llvm-project/commit/f957d080e9e9702ea9519d795f549e2bc37e3067
DIFF: https://github.com/llvm/llvm-project/commit/f957d080e9e9702ea9519d795f549e2bc37e3067.diff
LOG: [RISCV][GISEL] Legalize G_EXTRACT_SUBVECTOR (#109426)
This is heavily based on the SelectionDAG lowerEXTRACT_SUBVECTOR code.
Added:
llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir
Modified:
llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
llvm/lib/Target/RISCV/RISCVInstrGISel.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
index 132b7ec9aeef7c..d9f3f4ab3935d3 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
@@ -800,6 +800,17 @@ class GInsertVectorElement : public GenericMachineInstr {
}
};
+/// Represents an extract subvector.
+class GExtractSubvector : public GenericMachineInstr {
+public:
+ Register getSrcVec() const { return getOperand(1).getReg(); }
+ uint64_t getIndexImm() const { return getOperand(2).getImm(); }
+
+ static bool classof(const MachineInstr *MI) {
+ return MI->getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR;
+ }
+};
+
/// Represents a freeze.
class GFreeze : public GenericMachineInstr {
public:
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index ecade6b5caed6e..ec8a2993883765 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -378,6 +378,8 @@ class LegalizerHelper {
LLT CastTy);
LegalizeResult bitcastConcatVector(MachineInstr &MI, unsigned TypeIdx,
LLT CastTy);
+ LegalizeResult bitcastExtractSubvector(MachineInstr &MI, unsigned TypeIdx,
+ LLT CastTy);
LegalizeResult lowerConstant(MachineInstr &MI);
LegalizeResult lowerFConstant(MachineInstr &MI);
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 2fb2d104f1ce34..3b2fd95076c465 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -3666,6 +3666,65 @@ LegalizerHelper::bitcastConcatVector(MachineInstr &MI, unsigned TypeIdx,
return Legalized;
}
+/// This attempts to bitcast G_EXTRACT_SUBVECTOR to CastTy.
+///
+/// <vscale x 8 x i1> = G_EXTRACT_SUBVECTOR <vscale x 16 x i1>, N
+///
+/// ===>
+///
+/// <vscale x 2 x i1> = G_BITCAST <vscale x 16 x i1>
+/// <vscale x 1 x i8> = G_EXTRACT_SUBVECTOR <vscale x 2 x i1>, N / 8
+/// <vscale x 8 x i1> = G_BITCAST <vscale x 1 x i8>
+LegalizerHelper::LegalizeResult
+LegalizerHelper::bitcastExtractSubvector(MachineInstr &MI, unsigned TypeIdx,
+ LLT CastTy) {
+ auto ES = cast<GExtractSubvector>(&MI);
+
+ if (!CastTy.isVector())
+ return UnableToLegalize;
+
+ if (TypeIdx != 0)
+ return UnableToLegalize;
+
+ Register Dst = ES->getReg(0);
+ Register Src = ES->getSrcVec();
+ uint64_t Idx = ES->getIndexImm();
+
+ MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
+
+ LLT DstTy = MRI.getType(Dst);
+ LLT SrcTy = MRI.getType(Src);
+ ElementCount DstTyEC = DstTy.getElementCount();
+ ElementCount SrcTyEC = SrcTy.getElementCount();
+ auto DstTyMinElts = DstTyEC.getKnownMinValue();
+ auto SrcTyMinElts = SrcTyEC.getKnownMinValue();
+
+ if (DstTy == CastTy)
+ return Legalized;
+
+ if (DstTy.getSizeInBits() != CastTy.getSizeInBits())
+ return UnableToLegalize;
+
+ unsigned CastEltSize = CastTy.getElementType().getSizeInBits();
+ unsigned DstEltSize = DstTy.getElementType().getSizeInBits();
+ if (CastEltSize < DstEltSize)
+ return UnableToLegalize;
+
+ auto AdjustAmt = CastEltSize / DstEltSize;
+ if (Idx % AdjustAmt != 0 || DstTyMinElts % AdjustAmt != 0 ||
+ SrcTyMinElts % AdjustAmt != 0)
+ return UnableToLegalize;
+
+ Idx /= AdjustAmt;
+ SrcTy = LLT::vector(SrcTyEC.divideCoefficientBy(AdjustAmt), AdjustAmt);
+ auto CastVec = MIRBuilder.buildBitcast(SrcTy, Src);
+ auto PromotedES = MIRBuilder.buildExtractSubvector(CastTy, CastVec, Idx);
+ MIRBuilder.buildBitcast(Dst, PromotedES);
+
+ ES->eraseFromParent();
+ return Legalized;
+}
+
LegalizerHelper::LegalizeResult LegalizerHelper::lowerLoad(GAnyLoad &LoadMI) {
// Lower to a memory-width G_LOAD and a G_SEXT/G_ZEXT/G_ANYEXT
Register DstReg = LoadMI.getDstReg();
@@ -3972,6 +4031,8 @@ LegalizerHelper::bitcast(MachineInstr &MI, unsigned TypeIdx, LLT CastTy) {
return bitcastInsertVectorElt(MI, TypeIdx, CastTy);
case TargetOpcode::G_CONCAT_VECTORS:
return bitcastConcatVector(MI, TypeIdx, CastTy);
+ case TargetOpcode::G_EXTRACT_SUBVECTOR:
+ return bitcastExtractSubvector(MI, TypeIdx, CastTy);
default:
return UnableToLegalize;
}
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 055193bcc2c8db..edb6a522f190d8 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -597,6 +597,31 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
SplatActions.clampScalar(1, sXLen, sXLen);
+ LegalityPredicate ExtractSubvecBitcastPred = [=](const LegalityQuery &Query) {
+ LLT DstTy = Query.Types[0];
+ LLT SrcTy = Query.Types[1];
+ return DstTy.getElementType() == LLT::scalar(1) &&
+ DstTy.getElementCount().getKnownMinValue() >= 8 &&
+ SrcTy.getElementCount().getKnownMinValue() >= 8;
+ };
+ getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
+ // We don't have the ability to slide mask vectors down indexed by their
+ // i1 elements; the smallest we can do is i8. Often we are able to bitcast
+ // to equivalent i8 vectors.
+ .bitcastIf(
+ all(typeIsLegalBoolVec(0, BoolVecTys, ST),
+ typeIsLegalBoolVec(1, BoolVecTys, ST), ExtractSubvecBitcastPred),
+ [=](const LegalityQuery &Query) {
+ LLT CastTy = LLT::vector(
+ Query.Types[0].getElementCount().divideCoefficientBy(8), 8);
+ return std::pair(0, CastTy);
+ })
+ .customIf(LegalityPredicates::any(
+ all(typeIsLegalBoolVec(0, BoolVecTys, ST),
+ typeIsLegalBoolVec(1, BoolVecTys, ST)),
+ all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
+ typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST))));
+
getLegacyLegalizerInfo().computeTables();
}
@@ -931,6 +956,105 @@ bool RISCVLegalizerInfo::legalizeSplatVector(MachineInstr &MI,
return true;
}
+static LLT getLMUL1Ty(LLT VecTy) {
+ assert(VecTy.getElementType().getSizeInBits() <= 64 &&
+ "Unexpected vector LLT");
+ return LLT::scalable_vector(RISCV::RVVBitsPerBlock /
+ VecTy.getElementType().getSizeInBits(),
+ VecTy.getElementType());
+}
+
+bool RISCVLegalizerInfo::legalizeExtractSubvector(MachineInstr &MI,
+ LegalizerHelper &Helper,
+ MachineIRBuilder &MIB) const {
+ GExtractSubvector &ES = cast<GExtractSubvector>(MI);
+
+ MachineRegisterInfo &MRI = *MIB.getMRI();
+
+ Register Dst = ES.getReg(0);
+ Register Src = ES.getSrcVec();
+ uint64_t Idx = ES.getIndexImm();
+
+ // With an index of 0 this is a cast-like subvector, which can be performed
+ // with subregister operations.
+ if (Idx == 0)
+ return true;
+
+ LLT LitTy = MRI.getType(Dst);
+ LLT BigTy = MRI.getType(Src);
+
+ if (LitTy.getElementType() == LLT::scalar(1)) {
+ // We can't slide this mask vector up indexed by its i1 elements.
+ // This poses a problem when we wish to insert a scalable vector which
+ // can't be re-expressed as a larger type. Just choose the slow path and
+ // extend to a larger type, then truncate back down.
+ LLT ExtBigTy = BigTy.changeElementType(LLT::scalar(8));
+ LLT ExtLitTy = LitTy.changeElementType(LLT::scalar(8));
+ auto BigZExt = MIB.buildZExt(ExtBigTy, Src);
+ auto ExtractZExt = MIB.buildExtractSubvector(ExtLitTy, BigZExt, Idx);
+ auto SplatZero = MIB.buildSplatVector(
+ ExtLitTy, MIB.buildConstant(ExtLitTy.getElementType(), 0));
+ MIB.buildICmp(CmpInst::Predicate::ICMP_NE, Dst, ExtractZExt, SplatZero);
+ MI.eraseFromParent();
+ return true;
+ }
+
+ // extract_subvector scales the index by vscale if the subvector is scalable,
+ // and decomposeSubvectorInsertExtractToSubRegs takes this into account.
+ const RISCVRegisterInfo *TRI = STI.getRegisterInfo();
+ MVT LitTyMVT = getMVTForLLT(LitTy);
+ auto Decompose =
+ RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
+ getMVTForLLT(BigTy), LitTyMVT, Idx, TRI);
+ unsigned RemIdx = Decompose.second;
+
+ // If the Idx has been completely eliminated then this is a subvector extract
+ // which naturally aligns to a vector register. These can easily be handled
+ // using subregister manipulation.
+ if (RemIdx == 0)
+ return true;
+
+ // Else LitTy is M1 or smaller and may need to be slid down: if LitTy
+ // was > M1 then the index would need to be a multiple of VLMAX, and so would
+ // divide exactly.
+ assert(
+ RISCVVType::decodeVLMUL(RISCVTargetLowering::getLMUL(LitTyMVT)).second ||
+ RISCVTargetLowering::getLMUL(LitTyMVT) == RISCVII::VLMUL::LMUL_1);
+
+ // If the vector type is an LMUL-group type, extract a subvector equal to the
+ // nearest full vector register type.
+ LLT InterLitTy = BigTy;
+ Register Vec = Src;
+ if (TypeSize::isKnownGT(BigTy.getSizeInBits(),
+ getLMUL1Ty(BigTy).getSizeInBits())) {
+ // If BigTy has an LMUL > 1, then LitTy should have a smaller LMUL, and
+ // we should have successfully decomposed the extract into a subregister.
+ assert(Decompose.first != RISCV::NoSubRegister);
+ InterLitTy = getLMUL1Ty(BigTy);
+ // SDAG builds a TargetExtractSubreg. We cannot create a a Copy with SubReg
+ // specified on the source Register (the equivalent) since generic virtual
+ // register does not allow subregister index.
+ Vec = MIB.buildExtractSubvector(InterLitTy, Src, Idx - RemIdx).getReg(0);
+ }
+
+ // Slide this vector register down by the desired number of elements in order
+ // to place the desired subvector starting at element 0.
+ const LLT XLenTy(STI.getXLenVT());
+ auto SlidedownAmt = MIB.buildVScale(XLenTy, RemIdx);
+ auto [Mask, VL] = buildDefaultVLOps(LitTy, MIB, MRI);
+ uint64_t Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
+ auto Slidedown = MIB.buildInstr(
+ RISCV::G_VSLIDEDOWN_VL, {InterLitTy},
+ {MIB.buildUndef(InterLitTy), Vec, SlidedownAmt, Mask, VL, Policy});
+
+ // Now the vector is in the right position, extract our final subvector. This
+ // should resolve to a COPY.
+ MIB.buildExtractSubvector(Dst, Slidedown, 0);
+
+ MI.eraseFromParent();
+ return true;
+}
+
bool RISCVLegalizerInfo::legalizeCustom(
LegalizerHelper &Helper, MachineInstr &MI,
LostDebugLocObserver &LocObserver) const {
@@ -1001,6 +1125,8 @@ bool RISCVLegalizerInfo::legalizeCustom(
return legalizeExt(MI, MIRBuilder);
case TargetOpcode::G_SPLAT_VECTOR:
return legalizeSplatVector(MI, MIRBuilder);
+ case TargetOpcode::G_EXTRACT_SUBVECTOR:
+ return legalizeExtractSubvector(MI, Helper, MIRBuilder);
case TargetOpcode::G_LOAD:
case TargetOpcode::G_STORE:
return legalizeLoadStore(MI, Helper, MIRBuilder);
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index 2fc28615e7630d..6919bca5a9104f 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -46,6 +46,8 @@ class RISCVLegalizerInfo : public LegalizerInfo {
bool legalizeVScale(MachineInstr &MI, MachineIRBuilder &MIB) const;
bool legalizeExt(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
bool legalizeSplatVector(MachineInstr &MI, MachineIRBuilder &MIB) const;
+ bool legalizeExtractSubvector(MachineInstr &MI, LegalizerHelper &Helper,
+ MachineIRBuilder &MIB) const;
bool legalizeLoadStore(MachineInstr &MI, LegalizerHelper &Helper,
MachineIRBuilder &MIB) const;
};
diff --git a/llvm/lib/Target/RISCV/RISCVInstrGISel.td b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
index ba40662c49c1df..f6bf74c565ab38 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
@@ -57,3 +57,13 @@ def G_SPLAT_VECTOR_SPLIT_I64_VL : RISCVGenericInstruction {
let InOperandList = (ins type0:$passthru, type1:$hi, type1:$lo, type2:$vl);
let hasSideEffects = false;
}
+
+// Pseudo equivalent to a RISCVISD::VSLIDEDOWN_VL
+def G_VSLIDEDOWN_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$merge, type0:$vec, type1:$idx, type2:$mask,
+ type1:$vl, type1:$policy);
+ let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VSLIDEDOWN_VL, riscv_slidedown_vl>;
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir
new file mode 100644
index 00000000000000..5f730bf436ea5a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir
@@ -0,0 +1,483 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,RV32
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,RV64
+
+# Special handling for i1-element vectors with non-zero index
+---
+name: extract_subvector_nxv2i1_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: extract_subvector_nxv2i1_nxv4i1
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C2]](s32)
+ ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 2 x s1>), $x0, 3
+ ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 4 x s8>), 0
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: extract_subvector_nxv2i1_nxv4i1
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; RV64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+ ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C2]](s64)
+ ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), $x0, 3
+ ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 4 x s8>), 0
+ ; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 4 x s1>), 2
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_subvector_nxv2i1_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: extract_subvector_nxv2i1_nxv8i1
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C2]](s32)
+ ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 2 x s1>), $x0, 3
+ ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
+ ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: extract_subvector_nxv2i1_nxv8i1
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; RV64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+ ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C2]](s64)
+ ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), $x0, 3
+ ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
+ ; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 8 x s1>), 2
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_subvector_nxv4i1_nxv64i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: extract_subvector_nxv4i1_nxv64i1
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
+ ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT_SUBVECTOR [[SELECT]](<vscale x 64 x s8>), 16
+ ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
+ ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 4 x s8>), [[SPLAT_VECTOR2]]
+ ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: extract_subvector_nxv4i1_nxv64i1
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
+ ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
+ ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT_SUBVECTOR [[SELECT]](<vscale x 64 x s8>), 16
+ ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+ ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
+ ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 4 x s8>), [[SPLAT_VECTOR2]]
+ ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 64 x s1>), 16
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_subvector_nxv32i1_nxv64i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: extract_subvector_nxv32i1_nxv64i1
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[DEF]](<vscale x 64 x s1>)
+ ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[BITCAST]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 4 x s1>), $x0, 3
+ ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
+ ; RV32-NEXT: [[BITCAST1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_BITCAST [[EXTRACT_SUBVECTOR]](<vscale x 4 x s8>)
+ ; RV32-NEXT: $v8 = COPY [[BITCAST1]](<vscale x 32 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: extract_subvector_nxv32i1_nxv64i1
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[DEF]](<vscale x 64 x s1>)
+ ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[BITCAST]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 4 x s1>), $x0, 3
+ ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
+ ; RV64-NEXT: [[BITCAST1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_BITCAST [[EXTRACT_SUBVECTOR]](<vscale x 4 x s8>)
+ ; RV64-NEXT: $v8 = COPY [[BITCAST1]](<vscale x 32 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 64 x s1>), 32
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+...
+
+# i1-element vectors with zero index
+---
+name: extract_subvector_nxv1i1_nxv4i1_zero
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: extract_subvector_nxv1i1_nxv4i1_zero
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 4 x s1>), 0
+ ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 4 x s1>), 0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_subvector_nxv4i1_nxv8i1_zero
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: extract_subvector_nxv4i1_nxv8i1_zero
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 8 x s1>), 0
+ ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 8 x s1>), 0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_subvector_nxv32i1_nxv64i1_zero
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: extract_subvector_nxv32i1_nxv64i1_zero
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[DEF]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT_SUBVECTOR [[BITCAST]](<vscale x 8 x s8>), 0
+ ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_BITCAST [[EXTRACT_SUBVECTOR]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST1]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 64 x s1>), 0
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+...
+
+# Extract with zero index
+---
+name: extract_subvector_nxv1i8_nxv2i8_zero
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: extract_subvector_nxv1i8_nxv2i8_zero
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 2 x s8>), 0
+ ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s8>) = G_EXTRACT_SUBVECTOR %0(<vscale x 2 x s8>), 0
+ $v8 = COPY %1(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_subvector_nxv2i16_nxv4i16_zero
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: extract_subvector_nxv2i16_nxv4i16_zero
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 4 x s16>), 0
+ ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s16>) = G_EXTRACT_SUBVECTOR %0(<vscale x 4 x s16>), 0
+ $v8 = COPY %1(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_subvector_nxv4i32_nxv8i32_zero
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: extract_subvector_nxv4i32_nxv8i32_zero
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 8 x s32>), 0
+ ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s32>) = G_EXTRACT_SUBVECTOR %0(<vscale x 8 x s32>), 0
+ $v8 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_subvector_nxv4i8_nxv8i64_zero
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: extract_subvector_nxv4i8_nxv8i64_zero
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 8 x s64>), 0
+ ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s64>) = G_EXTRACT_SUBVECTOR %0(<vscale x 8 x s64>), 0
+ $v8 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8
+...
+
+# Extract with non-zero index
+---
+name: extract_subvector_nxv1i8_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: extract_subvector_nxv1i8_nxv2i8
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[DEF]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+ ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s8>), 0
+ ; RV32-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: extract_subvector_nxv1i8_nxv2i8
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+ ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[DEF]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+ ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s8>), 0
+ ; RV64-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s8>) = G_EXTRACT_SUBVECTOR %0(<vscale x 2 x s8>), 1
+ $v8 = COPY %1(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_subvector_nxv1i16_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: extract_subvector_nxv1i16_nxv4i16
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s16>) = G_VSLIDEDOWN_VL [[DEF1]], [[DEF]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+ ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 4 x s16>), 0
+ ; RV32-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: extract_subvector_nxv1i16_nxv4i16
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+ ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s16>) = G_VSLIDEDOWN_VL [[DEF1]], [[DEF]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+ ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 4 x s16>), 0
+ ; RV64-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s16>) = G_EXTRACT_SUBVECTOR %0(<vscale x 4 x s16>), 2
+ $v8 = COPY %1(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_subvector_nxv4i32_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: extract_subvector_nxv4i32_nxv8i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 8 x s32>), 4
+ ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s32>) = G_EXTRACT_SUBVECTOR %0(<vscale x 8 x s32>), 4
+ $v8 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_subvector_nxv2i64_nxv8i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: extract_subvector_nxv2i64_nxv8i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 8 x s64>), 2
+ ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s64>) = G_EXTRACT_SUBVECTOR %0(<vscale x 8 x s64>), 2
+ $v8 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_subvector_subregidx_zero
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: extract_subvector_subregidx_zero
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 16 x s32>), 0
+ ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+ ; RV32-NEXT: [[EXTRACT_SUBVECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s32>), 0
+ ; RV32-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR1]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: extract_subvector_subregidx_zero
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 16 x s32>), 0
+ ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+ ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+ ; RV64-NEXT: [[EXTRACT_SUBVECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s32>), 0
+ ; RV64-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR1]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR %0(<vscale x 16 x s32>), 1
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_subvector_subregidx
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32-LABEL: name: extract_subvector_subregidx
+ ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 16 x s32>), 2
+ ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+ ; RV32-NEXT: [[EXTRACT_SUBVECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s32>), 0
+ ; RV32-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR1]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: extract_subvector_subregidx
+ ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 16 x s32>), 2
+ ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+ ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+ ; RV64-NEXT: [[EXTRACT_SUBVECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s32>), 0
+ ; RV64-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR1]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR %0(<vscale x 16 x s32>), 3
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+
More information about the llvm-commits
mailing list