[llvm] [RISCV][GISEL] Legalize G_INSERT_VECTOR_ELT (PR #108250)
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 11 10:48:20 PDT 2024
https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/108250
>From a8e49b9ef065cff26952be9284714cc9a863cb24 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 6 Mar 2024 09:50:24 -0800
Subject: [PATCH 1/4] [RISCV][GISEL] Add opcodes needed to legalize G_INSERT
and G_EXTRACT
---
llvm/lib/Target/RISCV/RISCVInstrGISel.td | 76 ++++++++++++++++++++++++
1 file changed, 76 insertions(+)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrGISel.td b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
index ba40662c49c1df..4150f2b2c93777 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
@@ -41,6 +41,82 @@ def G_VMCLR_VL : RISCVGenericInstruction {
}
def : GINodeEquiv<G_VMCLR_VL, riscv_vmclr_vl>;
+// Pseudo equivalent to a RISCVISD::VFMV_S_F_VL
+def G_VFMV_S_F_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$vec, type1:$scalar, type2:$vl);
+ let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VFMV_S_F_VL, riscv_vfmv_s_f_vl>;
+
+// Pseudo equivalent to a RISCVISD::VFMV_V_F_VL
+def G_VFMV_V_F_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$vec, type1:$scalar, type2:$vl);
+ let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VFMV_V_F_VL, riscv_vfmv_v_f_vl>;
+
+// Pseudo equivalent to a RISCVISD::VMV_S_X_VL
+def G_VMV_S_X_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$vec, type1:$scalar, type2:$vl);
+ let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VMV_S_X_VL, riscv_vmv_s_x_vl>;
+
+// Pseudo equivalent to a RISCVISD::VMV_V_X_VL
+def G_VMV_V_X_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$vec, type1:$scalar, type2:$vl);
+ let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VMV_V_X_VL, riscv_vmv_v_x_vl>;
+
+// Pseudo equivalent to a RISCVISD::VMV_V_V_VL
+def G_VMV_V_V_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$vec, type2:$vl);
+ let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VMV_V_V_VL, riscv_vmv_v_v_vl>;
+
+// This instruction is a vector move instruction where the type of the element
+// can be either a integer or a floating point but the registerbank information
+// is not yet available to decide between G_VMV_V_X and G_VFMV_V_F.
+def G_VEC_MOVE_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$vec, type1:$scalar, type2:$vl);
+ let hasSideEffects = false;
+}
+
+// This instruction is a scalar move instruction where the type of the element
+// can be either a integer or a floating point but the registerbank information
+// is not yet available to decide between G_VMV_S_X and G_VFMV_S_F.
+def G_SCALAR_MOVE_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$vec, type1:$scalar, type2:$vl);
+ let hasSideEffects = false;
+}
+
+// Pseudo equivalent to a RISCVISD::VSLIDEUP_VL
+def G_VSLIDEUP_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$merge, type0:$vec, type1:$idx, type2:$mask,
+ type3:$vl, type4:$policy);
+ let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VSLIDEUP_VL, riscv_slideup_vl>;
+
+// Pseudo equivalent to a RISCVISD::VSLIDEDOWN_VL
+def G_VSLIDEDOWN_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$merge, type0:$vec, type1:$idx, type2:$mask,
+ type3:$vl, type4:$policy);
+ let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VSLIDEDOWN_VL, riscv_slidedown_vl>;
+
// Pseudo equivalent to a RISCVISD::VMSET_VL
def G_VMSET_VL : RISCVGenericInstruction {
let OutOperandList = (outs type0:$dst);
>From 5dea160a800be6890625ad1c746c5e2b22f483a5 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 5 Mar 2024 11:09:15 -0800
Subject: [PATCH 2/4] [RISCV][GISEL] Legalize G_INSERT and G_EXTRACT for
vectors
---
.../CodeGen/GlobalISel/MachineIRBuilder.cpp | 8 +-
llvm/lib/CodeGen/MachineVerifier.cpp | 16 +-
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 400 ++++++++++++++++++
.../Target/RISCV/GISel/RISCVLegalizerInfo.h | 3 +
.../legalizer/rvv/legalize-extract.mir | 195 +++++++++
5 files changed, 612 insertions(+), 10 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract.mir
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 925a1c7cf6aacc..96ca99a3871d8e 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -621,7 +621,8 @@ MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
#ifndef NDEBUG
assert(SrcTy.isValid() && "invalid operand type");
assert(DstTy.isValid() && "invalid operand type");
- assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
+ assert(TypeSize::isKnownLE(DstTy.getSizeInBits().getWithIncrement(Index),
+ SrcTy.getSizeInBits()) &&
"extracting off end of register");
#endif
@@ -797,8 +798,9 @@ MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
const SrcOp &Src,
const SrcOp &Op,
unsigned Index) {
- assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
- Res.getLLTTy(*getMRI()).getSizeInBits() &&
+ assert(TypeSize::isKnownLE(
+ Op.getLLTTy(*getMRI()).getSizeInBits().getWithIncrement(Index),
+ Res.getLLTTy(*getMRI()).getSizeInBits()) &&
"insertion past the end of a register");
if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 759201ed9dadc7..606929bb594e93 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1587,12 +1587,13 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
break;
}
- unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
- unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
+ TypeSize DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
+ TypeSize SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
if (SrcSize == DstSize)
report("extract source must be larger than result", MI);
- if (DstSize + OffsetOp.getImm() > SrcSize)
+ if (DstSize.getKnownMinValue() + OffsetOp.getImm() >
+ SrcSize.getKnownMinValue())
report("extract reads past end of register", MI);
break;
}
@@ -1609,13 +1610,14 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
break;
}
- unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
- unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
+ TypeSize DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
+ TypeSize SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
- if (DstSize <= SrcSize)
+ if (TypeSize::isKnownLE(DstSize, SrcSize))
report("inserted size must be smaller than total register", MI);
- if (SrcSize + OffsetOp.getImm() > DstSize)
+ if (SrcSize.getKnownMinValue() + OffsetOp.getImm() >
+ DstSize.getKnownMinValue())
report("insert writes past end of register", MI);
break;
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 64e8ee76e83915..7413d094a11eb7 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -580,6 +580,27 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
SplatActions.clampScalar(1, sXLen, sXLen);
+ getActionDefinitionsBuilder(G_INSERT)
+ .customIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST),
+ typeIsLegalBoolVec(1, BoolVecTys, ST)))
+ .customIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
+ typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)));
+
+ getActionDefinitionsBuilder(G_EXTRACT)
+ .customIf(typeIsLegalBoolVec(0, BoolVecTys, ST))
+ .customIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST));
+
+ // TODO: i64-element vectors on RV32 may be legalized in certain cases.
+ getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT)
+ .clampScalar(2, sXLen, sXLen) // clamp Index operand to SXLen
+ .customIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
+ typeInSet(1, {s8, s16, s32, s64}), typeInSet(2, {sXLen})));
+
+ getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
+ .clampScalar(1, sXLen, sXLen) // clamp Index operand to SXLen
+ .customIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
+ typeInSet(1, {sXLen})));
+
getLegacyLegalizerInfo().computeTables();
}
@@ -914,6 +935,379 @@ bool RISCVLegalizerInfo::legalizeSplatVector(MachineInstr &MI,
return true;
}
+/// Insert into the first position of a vector, and that vector is slid up to
+/// the insert index. By limiting the active vector length to index+1 and
+/// merging with the original vector (with an undisturbed tail policy for
+/// elements >= VL), we achieve the desired result of leaving all elements
+/// untouched except the one at VL-1, which is replaced with the desired value.
+bool RISCVLegalizerInfo::legalizeInsertVectorElt(MachineInstr &MI,
+ MachineIRBuilder &MIB) const {
+ assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT);
+
+ MachineRegisterInfo &MRI = *MIB.getMRI();
+
+ Register Dst = MI.getOperand(0).getReg();
+ Register SrcVec = MI.getOperand(1).getReg();
+ Register Elt = MI.getOperand(2).getReg();
+ Register Idx = MI.getOperand(3).getReg();
+
+ LLT VecTy = MRI.getType(Dst);
+ const LLT EltTy = VecTy.getElementType();
+ const LLT XLenTy(STI.getXLenVT());
+
+ // FIXME: SelectionDAG promotes a s1 vector type to s8 vector type at this
+ // point. GISel should take care of this during legalization. For now, the
+ // legalizer will return not legal.
+
+ // If we know the index we're going to insert at, we can shrink Vec so that
+ // we're performing the scalar inserts and slideup on a smaller LMUL.
+
+ // If we can shrink the vector type, then a G_EXTRACT will be used to the the
+ // smaller vector. The insertion will occur on that smaller vector, and then
+ // a G_INSERT will be used to put the result back into the larger, original
+ // vector.
+ unsigned AlignedIdx = 0;
+ auto InsertVec = SrcVec;
+ auto NewIdx = Idx;
+ if (auto ConstIdxOpt = getIConstantVRegVal(Idx, MRI)) {
+ const unsigned OrigIdx = ConstIdxOpt->getZExtValue();
+ // Do we know an upper bound on LMUL?
+ if (auto ShrunkTy = getSmallestLLTForIndex(VecTy, OrigIdx, STI))
+ VecTy = *ShrunkTy;
+
+ // If we're compiling for an exact VLEN value, we can always perform
+ // the insert in m1 as we can determine the register corresponding to
+ // the index in the register group.
+ const unsigned MinVLen = STI.getRealMinVLen();
+ const unsigned MaxVLen = STI.getRealMaxVLen();
+ const LLT M1Ty = getLMUL1Ty(VecTy);
+ if (MinVLen == MaxVLen &&
+ VecTy.getSizeInBits().getKnownMinValue() > MinVLen) {
+ unsigned ElemsPerVReg = MinVLen / EltTy.getSizeInBits().getFixedValue();
+ unsigned RemIdx = OrigIdx % ElemsPerVReg;
+ unsigned SubRegIdx = OrigIdx / ElemsPerVReg;
+ unsigned ExtractIdx =
+ SubRegIdx * M1Ty.getElementCount().getKnownMinValue();
+ AlignedIdx = ExtractIdx;
+ NewIdx = MIB.buildConstant(MRI.getType(Idx), RemIdx).getReg(0);
+ VecTy = M1Ty;
+ }
+
+ if (AlignedIdx)
+ InsertVec = MIB.buildExtract(VecTy, SrcVec, AlignedIdx).getReg(0);
+ }
+
+ // TODO: i64-element vectors on RV32 can be lowered without scalar
+ // legalization if the most-significant 32 bits of the value are not affected
+ // by the sign-extension of the lower 32 bits. The Legalizer does not allow
+ // i64-elements through at the moment, so there is no sense in supporting
+ // the selection for this case.
+
+ // Insert into index zero
+
+ auto [Mask, VL] = buildDefaultVLOps(VecTy, MIB, MRI);
+ if (isNullOrNullSplat(*MRI.getVRegDef(NewIdx), MRI)) {
+ // TODO: Make sure we have fprb tests for regbankselect and legalization
+ // too.
+ auto Move =
+ MIB.buildInstr(RISCV::G_SCALAR_MOVE_VL, {VecTy}, {InsertVec, Elt, VL});
+ if (AlignedIdx)
+ MIB.buildInsert(Dst, InsertVec, Move, AlignedIdx);
+
+ MI.eraseFromParent();
+ return true;
+ }
+
+ // Insert into non-constant or non-zero-constant index
+
+ auto ValInVec = buildScalarInsert(VecTy, Elt, VL, MIB, MRI, STI);
+ // Now that the value is in lane 0 of vector, slide it into position.
+ auto InsertVL = MIB.buildAdd(XLenTy, NewIdx, MIB.buildConstant(XLenTy, 1));
+ // TODO: SelectionDAG uses tail agnostic policy if Idx is the last index of
+ // Vec. It can do this because it lets in fixed vectors as a legal type. GISel
+ // does not. Can we find a way to use TA if Vec was fixed vector before
+ // legalization?
+ uint64_t Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
+ auto Slideup =
+ MIB.buildInstr(RISCV::G_VSLIDEUP_VL, {VecTy},
+ {InsertVec, ValInVec, NewIdx, Mask, InsertVL, Policy});
+ // If we used a smaller vector to do the insertion, put the smaller vector
+ // result back into the original vector.
+ if (AlignedIdx)
+ MIB.buildInsert(Dst, InsertVec, Slideup, AlignedIdx);
+
+ MI.eraseFromParent();
+ return true;
+}
+
+static MachineInstrBuilder buildVLMax(LLT VecTy, MachineIRBuilder &MIB) {
+ assert(VecTy.isScalableVector() && "Expected scalable vector");
+ // TODO: Figure out how to represent VLMAX as a MI
+ llvm_unreachable("Unimplemented");
+}
+
+bool RISCVLegalizerInfo::legalizeInsert(MachineInstr &MI,
+ MachineIRBuilder &MIB) const {
+ assert(MI.getOpcode() == TargetOpcode::G_INSERT);
+
+ MachineRegisterInfo &MRI = *MIB.getMRI();
+
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src1 = MI.getOperand(1).getReg();
+ Register Src2 = MI.getOperand(2).getReg();
+ uint64_t Idx = MI.getOperand(3).getImm();
+
+ // Only support vectors using custom legalization
+ LLT BigTy = MRI.getType(Dst);
+ if (BigTy.isScalar())
+ return false;
+
+ LLT LitTy = MRI.getType(Src2);
+ Register BigVec = Src1;
+ Register LitVec = Src2;
+
+ // We don't have the ability to slide mask vectors up indexed by their i1
+ // elements; the smallest we can do is i8. Often we are able to bitcast to
+ // equivalent i8 vectors. Otherwise, we can must zeroextend to equivalent i8
+ // vectors and truncate down after the insert.
+ if (LitTy.getElementType() == LLT::scalar(1) &&
+ (Idx != 0 ||
+ MRI.getVRegDef(BigVec)->getOpcode() != TargetOpcode::G_IMPLICIT_DEF)) {
+ auto BigTyMinElts = BigTy.getElementCount().getKnownMinValue();
+ auto LitTyMinElts = LitTy.getElementCount().getKnownMinValue();
+ if (BigTyMinElts >= 8 && LitTyMinElts >= 8) {
+ assert(Idx % 8 == 0 && "Invalid index");
+ assert(BigTyMinElts % 8 == 0 && LitTyMinElts % 8 == 0 &&
+ "Unexpected mask vector lowering");
+ Idx /= 8;
+ BigTy = LLT::vector(BigTy.getElementCount().divideCoefficientBy(8), 8);
+ LitTy = LLT::vector(LitTy.getElementCount().divideCoefficientBy(8), 8);
+ BigVec = MIB.buildBitcast(BigTy, BigVec).getReg(0);
+ LitVec = MIB.buildBitcast(LitTy, LitVec).getReg(0);
+ } else {
+ // We can't slide this mask vector up indexed by its i1 elements.
+ // This poses a problem when we wish to insert a scalable vector which
+ // can't be re-expressed as a larger type. Just choose the slow path and
+ // extend to a larger type, then truncate back down.
+ LLT ExtBigTy = BigTy.changeElementType(LLT::scalar(8));
+ LLT ExtLitTy = LitTy.changeElementType(LLT::scalar(8));
+ auto BigZExt = MIB.buildZExt(ExtBigTy, BigVec);
+ auto LitZExt = MIB.buildZExt(ExtLitTy, LitVec);
+ auto Insert = MIB.buildInsert(ExtBigTy, BigZExt, LitZExt, Idx);
+ auto SplatZero = MIB.buildConstant(ExtBigTy, 0);
+ MIB.buildICmp(CmpInst::Predicate::ICMP_NE, Dst, Insert, SplatZero);
+ MI.eraseFromParent();
+ return true;
+ }
+ }
+
+ const RISCVRegisterInfo *TRI = STI.getRegisterInfo();
+ MVT LitTyMVT = getMVTForLLT(LitTy);
+ unsigned SubRegIdx, RemIdx;
+ std::tie(SubRegIdx, RemIdx) =
+ RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
+ getMVTForLLT(BigTy), LitTyMVT, Idx, TRI);
+
+ RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(LitTy));
+ bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
+ SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
+ SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
+
+ // If the Idx has been completely eliminated and this subvector's size is a
+ // vector register or a multiple thereof, or the surrounding elements are
+ // undef, then this is a subvector insert which naturally aligns to a vector
+ // register. These can easily be handled using subregister manipulation.
+ if (RemIdx == 0 && (!IsSubVecPartReg || MRI.getVRegDef(Src1)->getOpcode() ==
+ TargetOpcode::G_IMPLICIT_DEF))
+ return true;
+
+ // If the subvector is smaller than a vector register, then the insertion
+ // must preserve the undisturbed elements of the register. We do this by
+ // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
+ // (which resolves to a subregister copy), performing a VSLIDEUP to place the
+ // subvector within the vector register, and an INSERT_SUBVECTOR of that
+ // LMUL=1 type back into the larger vector (resolving to another subregister
+ // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
+ // to avoid allocating a large register group to hold our subvector.
+
+ // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
+ // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
+ // (in our case undisturbed). This means we can set up a subvector insertion
+ // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
+ // size of the subvector.
+ const LLT XLenTy(STI.getXLenVT());
+ LLT InterLitTy = BigTy;
+ Register AlignedExtract = Src1;
+ unsigned AlignedIdx = Idx - RemIdx;
+ if (TypeSize::isKnownGT(BigTy.getSizeInBits(),
+ getLMUL1Ty(BigTy).getSizeInBits())) {
+ InterLitTy = getLMUL1Ty(BigTy);
+ // Extract a subvector equal to the nearest full vector register type. This
+ // should resolve to a G_EXTRACT on a subreg.
+ AlignedExtract = MIB.buildExtract(InterLitTy, BigVec, AlignedIdx).getReg(0);
+ }
+
+ auto Insert =
+ MIB.buildInsert(InterLitTy, MIB.buildUndef(InterLitTy), LitVec, 0);
+
+ auto [Mask, VL] = buildDefaultVLOps(BigTy, MIB, MRI);
+
+ ElementCount EndIndex =
+ ElementCount::getScalable(RemIdx) + LitTy.getElementCount();
+ VL = buildVLMax(LitTy, MIB).getReg(0);
+
+ // Use tail agnostic policy if we're inserting over InterLitTy's tail.
+ uint64_t Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
+ if (EndIndex == InterLitTy.getElementCount())
+ Policy = RISCVII::TAIL_AGNOSTIC;
+
+ // If we're inserting into the lowest elements, use a tail undisturbed
+ // vmv.v.v.
+ Register Inserted;
+ if (RemIdx == 0) {
+ Inserted = MIB.buildInstr(RISCV::G_VMV_V_V_VL, {InterLitTy},
+ {AlignedExtract, Insert, VL})
+ .getReg(0);
+ } else {
+ auto SlideupAmt = MIB.buildVScale(XLenTy, RemIdx);
+ // Construct the vector length corresponding to RemIdx + length(LitTy).
+ VL = MIB.buildAdd(XLenTy, SlideupAmt, VL).getReg(0);
+ Inserted =
+ MIB.buildInstr(RISCV::G_VSLIDEUP_VL, {InterLitTy},
+ {AlignedExtract, LitVec, SlideupAmt, Mask, VL, Policy})
+ .getReg(0);
+ }
+
+ // If required, insert this subvector back into the correct vector register.
+ // This should resolve to an INSERT_SUBREG instruction.
+ if (TypeSize::isKnownGT(BigTy.getSizeInBits(), InterLitTy.getSizeInBits()))
+ Inserted = MIB.buildInsert(BigTy, BigVec, LitVec, AlignedIdx).getReg(0);
+
+ // We might have bitcast from a mask type: cast back to the original type if
+ // required.
+ MIB.buildBitcast(Dst, Inserted);
+
+ MI.eraseFromParent();
+ return true;
+}
+
+bool RISCVLegalizerInfo::legalizeExtract(MachineInstr &MI,
+ MachineIRBuilder &MIB) const {
+ assert(MI.getOpcode() == TargetOpcode::G_EXTRACT);
+
+ MachineRegisterInfo &MRI = *MIB.getMRI();
+
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+ uint64_t Idx = MI.getOperand(2).getImm();
+
+ // Only support vectors using custom legalization
+ LLT LitTy = MRI.getType(Dst);
+ if (LitTy.isScalar())
+ return false;
+
+ LLT BigTy = MRI.getType(Src);
+ Register Vec = Src;
+
+ // We don't have the ability to slide mask vectors down indexed by their i1
+ // elements; the smallest we can do is i8. Often we are able to bitcast to
+ // equivalent i8 vectors.
+ if (LitTy.getElementType() == LLT::scalar(1) && Idx != 0) {
+ auto BigTyMinElts = BigTy.getElementCount().getKnownMinValue();
+ auto LitTyMinElts = LitTy.getElementCount().getKnownMinValue();
+ if (BigTyMinElts >= 8 && LitTyMinElts >= 8) {
+ assert(Idx % 8 == 0 && "Invalid index");
+ assert(BigTyMinElts % 8 == 0 && LitTyMinElts % 8 == 0 &&
+ "Unexpected mask vector lowering");
+ Idx /= 8;
+ BigTy = LLT::vector(BigTy.getElementCount().divideCoefficientBy(8), 8);
+ LitTy = LLT::vector(LitTy.getElementCount().divideCoefficientBy(8), 8);
+ Vec = MIB.buildBitcast(BigTy, Vec).getReg(0);
+ } else {
+ // We can't slide this mask vector up indexed by its i1 elements.
+ // This poses a problem when we wish to insert a scalable vector which
+ // can't be re-expressed as a larger type. Just choose the slow path and
+ // extend to a larger type, then truncate back down.
+ LLT ExtBigTy = BigTy.changeElementType(LLT::scalar(8));
+ LLT ExtLitTy = LitTy.changeElementType(LLT::scalar(8));
+ auto BigZExt = MIB.buildZExt(ExtBigTy, Vec);
+ auto ExtractZExt = MIB.buildExtract(ExtLitTy, BigZExt, Idx);
+ auto SplatZero = MIB.buildSplatVector(
+ ExtLitTy, MIB.buildConstant(ExtLitTy.getElementType(), 0));
+ MIB.buildICmp(CmpInst::Predicate::ICMP_NE, Dst, ExtractZExt, SplatZero);
+ MI.eraseFromParent();
+ return true;
+ }
+ }
+
+ // With an index of 0 this is a cast-like subvector, which can be performed
+ // with subregister operations.
+ if (Idx == 0)
+ return true;
+
+ // extract_subvector scales the index by vscale if the subvector is scalable,
+ // and decomposeSubvectorInsertExtractToSubRegs takes this into account.
+ const RISCVRegisterInfo *TRI = STI.getRegisterInfo();
+ MVT LitTyMVT = getMVTForLLT(LitTy);
+ unsigned SubRegIdx;
+ ElementCount RemIdx;
+ auto Decompose =
+ RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
+ getMVTForLLT(BigTy), LitTyMVT, Idx, TRI);
+ SubRegIdx = Decompose.first;
+ RemIdx = ElementCount::getScalable(Decompose.second);
+
+ // If the Idx has been completely eliminated then this is a subvector extract
+ // which naturally aligns to a vector register. These can easily be handled
+ // using subregister manipulation.
+ // TODO: add tests
+ if (RemIdx.isZero()) {
+ assert(false);
+ return true;
+ }
+
+ // Else LitTy is M1 or smaller and may need to be slid down: if LitTy
+ // was > M1 then the index would need to be a multiple of VLMAX, and so would
+ // divide exactly.
+ assert(
+ RISCVVType::decodeVLMUL(RISCVTargetLowering::getLMUL(LitTyMVT)).second ||
+ RISCVTargetLowering::getLMUL(LitTyMVT) == RISCVII::VLMUL::LMUL_1);
+
+ // If the vector type is an LMUL-group type, extract a subvector equal to the
+ // nearest full vector register type.
+ LLT InterLitTy = BigTy;
+ if (TypeSize::isKnownGT(BigTy.getSizeInBits(),
+ getLMUL1Ty(BigTy).getSizeInBits())) {
+ // If BigTy has an LMUL > 1, then LitTy should have a smaller LMUL, and
+ // we should have successfully decomposed the extract into a subregister.
+ assert(SubRegIdx != RISCV::NoSubRegister);
+ InterLitTy = getLMUL1Ty(BigTy);
+ // TODO: need to make this a G_EXTRACT_SUBREG?
+ Vec = MIB.buildExtract(InterLitTy, Vec, SubRegIdx).getReg(0);
+ }
+
+ // Slide this vector register down by the desired number of elements in order
+ // to place the desired subvector starting at element 0.
+ const LLT XLenTy(STI.getXLenVT());
+ auto SlidedownAmt = MIB.buildVScale(XLenTy, RemIdx.getKnownMinValue());
+ auto [Mask, VL] = buildDefaultVLOps(LitTy, MIB, MRI);
+ uint64_t Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
+ auto Slidedown = MIB.buildInstr(
+ RISCV::G_VSLIDEDOWN_VL, {InterLitTy},
+ {MIB.buildUndef(InterLitTy), Vec, SlidedownAmt, Mask, VL, Policy});
+
+ // Now the vector is in the right position, extract our final subvector. This
+ // should resolve to a COPY.
+ auto Extract = MIB.buildExtract(LitTy, Slidedown, 0);
+
+ // We might have bitcast from a mask type: cast back to the original type if
+ // required.
+ MIB.buildBitcast(Dst, Extract);
+
+ MI.eraseFromParent();
+ return true;
+}
+
bool RISCVLegalizerInfo::legalizeCustom(
LegalizerHelper &Helper, MachineInstr &MI,
LostDebugLocObserver &LocObserver) const {
@@ -987,6 +1381,12 @@ bool RISCVLegalizerInfo::legalizeCustom(
case TargetOpcode::G_LOAD:
case TargetOpcode::G_STORE:
return legalizeLoadStore(MI, Helper, MIRBuilder);
+ case TargetOpcode::G_INSERT_VECTOR_ELT:
+ return legalizeInsertVectorElt(MI, MIRBuilder);
+ case TargetOpcode::G_INSERT:
+ return legalizeInsert(MI, MIRBuilder);
+ case TargetOpcode::G_EXTRACT:
+ return legalizeExtract(MI, MIRBuilder);
}
llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index 2fc28615e7630d..b6caecf4dc84e9 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -48,6 +48,9 @@ class RISCVLegalizerInfo : public LegalizerInfo {
bool legalizeSplatVector(MachineInstr &MI, MachineIRBuilder &MIB) const;
bool legalizeLoadStore(MachineInstr &MI, LegalizerHelper &Helper,
MachineIRBuilder &MIB) const;
+ bool legalizeInsertVectorElt(MachineInstr &MI, MachineIRBuilder &MIB) const;
+ bool legalizeInsert(MachineInstr &MI, MachineIRBuilder &MIB) const;
+ bool legalizeExtract(MachineInstr &MI, MachineIRBuilder &MIB) const;
};
} // end namespace llvm
#endif
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract.mir
new file mode 100644
index 00000000000000..f51885f4227de7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract.mir
@@ -0,0 +1,195 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+# Special handling for i1-element vectors with non-zero index
+---
+name: extract_nxv2i1_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: extract_nxv2i1_nxv4i1
+ ; CHECK: PseudoRET
+ %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_EXTRACT %0(<vscale x 4 x s1>), 2
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_nxv4i1_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: extract_nxv4i1_nxv8i1
+ ; CHECK: PseudoRET
+ %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_EXTRACT %0(<vscale x 8 x s1>), 2
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: extract_nxv32i1_nxv64i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: extract_nxv32i1_nxv64i1
+ ; CHECK: PseudoRET
+ %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s1>) = G_EXTRACT %0(<vscale x 64 x s1>), 16
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+...
+
+# # i1-element vectors with zero index
+# ---
+# name: extract_nxv2i1_nxv4i1_zero
+# legalized: false
+# tracksRegLiveness: true
+# body: |
+# bb.0.entry:
+# ; CHECK-LABEL: name: extract_nxv2i1_nxv4i1_zero
+# ; CHECK: PseudoRET
+# %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+# %1:_(<vscale x 2 x s1>) = G_EXTRACT %0(<vscale x 4 x s1>), 0
+# $v8 = COPY %1(<vscale x 2 x s1>)
+# PseudoRET implicit $v8
+# ...
+# ---
+# name: extract_nxv4i1_nxv8i1_zero
+# legalized: false
+# tracksRegLiveness: true
+# body: |
+# bb.0.entry:
+# ; CHECK-LABEL: name: extract_nxv4i1_nxv8i1_zero
+# ; CHECK: PseudoRET
+# %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+# %1:_(<vscale x 4 x s1>) = G_EXTRACT %0(<vscale x 8 x s1>), 0
+# $v8 = COPY %1(<vscale x 4 x s1>)
+# PseudoRET implicit $v8
+# ...
+# ---
+# name: extract_nxv32i1_nxv64i1_zero
+# legalized: false
+# tracksRegLiveness: true
+# body: |
+# bb.0.entry:
+# ; CHECK-LABEL: name: extract_nxv32i1_nxv64i1_zero
+# ; CHECK: PseudoRET
+# %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+# %1:_(<vscale x 32 x s1>) = G_EXTRACT %0(<vscale x 64 x s1>), 0
+# $v8 = COPY %1(<vscale x 32 x s1>)
+# PseudoRET implicit $v8
+# ...
+#
+# # Extract with zero index
+# ---
+# name: extract_nxv1i8_nxv2i8_zero
+# legalized: false
+# tracksRegLiveness: true
+# body: |
+# bb.0.entry:
+# ; CHECK-LABEL: name: extract_nxv1i8_nxv2i8_zero
+# ; CHECK: PseudoRET
+# %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+# %1:_(<vscale x 1 x s8>) = G_EXTRACT %0(<vscale x 2 x s8>), 0
+# $v8 = COPY %1(<vscale x 1 x s8>)
+# PseudoRET implicit $v8
+# ...
+# ---
+# name: extract_nxv2i16_nxv4i16_zero
+# legalized: false
+# tracksRegLiveness: true
+# body: |
+# bb.0.entry:
+# ; CHECK-LABEL: name: extract_nxv2i16_nxv4i16_zero
+# ; CHECK: PseudoRET
+# %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+# %1:_(<vscale x 2 x s16>) = G_EXTRACT %0(<vscale x 4 x s16>), 0
+# $v8 = COPY %1(<vscale x 2 x s16>)
+# PseudoRET implicit $v8
+# ...
+# ---
+# name: extract_nxv4i32_nxv8i32_zero
+# legalized: false
+# tracksRegLiveness: true
+# body: |
+# bb.0.entry:
+# ; CHECK-LABEL: name: extract_nxv4i32_nxv8i32_zero
+# ; CHECK: PseudoRET
+# %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+# %1:_(<vscale x 4 x s32>) = G_EXTRACT %0(<vscale x 8 x s32>), 0
+# $v8 = COPY %1(<vscale x 4 x s32>)
+# PseudoRET implicit $v8
+# ...
+# ---
+# name: extract_nxv2i64_nxv8i64_zero
+# legalized: false
+# tracksRegLiveness: true
+# body: |
+# bb.0.entry:
+# ; CHECK-LABEL: name: extract_nxv2i64_nxv8i64_zero
+# ; CHECK: PseudoRET
+# %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+# %1:_(<vscale x 2 x s64>) = G_EXTRACT %0(<vscale x 8 x s64>), 0
+# $v8 = COPY %1(<vscale x 2 x s64>)
+# PseudoRET implicit $v8
+# ...
+#
+# # Extract with non-zero index
+# ---
+# name: extract_nxv1i8_nxv2i8
+# legalized: false
+# tracksRegLiveness: true
+# body: |
+# bb.0.entry:
+# ; CHECK-LABEL: name: extract_nxv1i8_nxv2i8
+# ; CHECK: PseudoRET
+# %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+# %1:_(<vscale x 1 x s8>) = G_EXTRACT %0(<vscale x 2 x s8>), 0
+# $v8 = COPY %1(<vscale x 1 x s8>)
+# PseudoRET implicit $v8
+# ...
+# ---
+# name: extract_nxv2i16_nxv4i16
+# legalized: false
+# tracksRegLiveness: true
+# body: |
+# bb.0.entry:
+# ; CHECK-LABEL: name: extract_nxv2i16_nxv4i16
+# ; CHECK: PseudoRET
+# %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+# %1:_(<vscale x 2 x s16>) = G_EXTRACT %0(<vscale x 4 x s16>), 0
+# $v8 = COPY %1(<vscale x 2 x s16>)
+# PseudoRET implicit $v8
+# ...
+# ---
+# name: extract_nxv4i32_nxv8i32
+# legalized: false
+# tracksRegLiveness: true
+# body: |
+# bb.0.entry:
+# ; CHECK-LABEL: name: extract_nxv4i32_nxv8i32
+# ; CHECK: PseudoRET
+# %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+# %1:_(<vscale x 4 x s32>) = G_EXTRACT %0(<vscale x 8 x s32>), 0
+# $v8 = COPY %1(<vscale x 4 x s32>)
+# PseudoRET implicit $v8
+# ...
+# ---
+# name: extract_nxv2i64_nxv8i64
+# legalized: false
+# tracksRegLiveness: true
+# body: |
+# bb.0.entry:
+# ; CHECK-LABEL: name: extract_nxv2i64_nxv8i64
+# ; CHECK: PseudoRET
+# %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+# %1:_(<vscale x 2 x s64>) = G_EXTRACT %0(<vscale x 8 x s64>), 0
+# $v8 = COPY %1(<vscale x 2 x s64>)
+# PseudoRET implicit $v8
+# ...
+#
+#
+#
>From b374aabd3d5c71f6cc19184775d6e89e1295ba9e Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 5 Mar 2024 10:54:36 -0800
Subject: [PATCH 3/4] [RISCV][GISEL] Legalize G_INSERT_VECTOR_ELT
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 110 +++
.../rvv/legalize-insertelement-knownvlen.mir | 65 ++
.../rvv/legalize-insertelement-rv64.mir | 856 ++++++++++++++++++
3 files changed, 1031 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-knownvlen.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-rv64.mir
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 7413d094a11eb7..1d824008089bfb 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -823,6 +823,39 @@ bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
return true;
}
+static LLT getLMUL1Ty(LLT VecTy) {
+ assert(VecTy.getElementType().getSizeInBits() <= 64 &&
+ "Unexpected vector LLT");
+ return LLT::scalable_vector(RISCV::RVVBitsPerBlock /
+ VecTy.getElementType().getSizeInBits(),
+ VecTy.getElementType());
+}
+
+/// Given a scalable vector type and an index into it, returns the type for the
+/// smallest subvector that the index fits in. This can be used to reduce LMUL
+/// for operations like vslidedown.
+///
+/// E.g. With Zvl128b, index 3 in a nxv4i32 fits within the first nxv2i32.
+static std::optional<LLT>
+getSmallestLLTForIndex(LLT VecTy, unsigned MaxIdx,
+ const RISCVSubtarget &Subtarget) {
+ assert(VecTy.isScalableVector());
+ const unsigned EltSize = VecTy.getScalarSizeInBits();
+ const unsigned VectorBitsMin = Subtarget.getRealMinVLen();
+ const unsigned MinVLMAX = VectorBitsMin / EltSize;
+ LLT SmallerTy;
+ if (MaxIdx < MinVLMAX)
+ SmallerTy = getLMUL1Ty(VecTy);
+ else if (MaxIdx < MinVLMAX * 2)
+ SmallerTy = getLMUL1Ty(VecTy).multiplyElements(2);
+ else if (MaxIdx < MinVLMAX * 4)
+ SmallerTy = getLMUL1Ty(VecTy).multiplyElements(4);
+ if (!SmallerTy.isValid() ||
+ !TypeSize::isKnownGT(VecTy.getSizeInBits(), SmallerTy.getSizeInBits()))
+ return std::nullopt;
+ return SmallerTy;
+}
+
/// Return the type of the mask type suitable for masking the provided
/// vector type. This is simply an i1 element type vector of the same
/// (possibly scalable) length.
@@ -879,6 +912,83 @@ buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru,
Unmerge.getReg(1), VL, MIB, MRI);
}
+static MachineInstrBuilder
+buildScalarSplat(const DstOp &Dst, const SrcOp &Passthru, Register Scalar,
+ Register VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI,
+ const RISCVSubtarget &Subtarget) {
+ const LLT XLenTy(Subtarget.getXLenVT());
+
+ // TODO: Simplest case is that the operand needs to be promoted to XLenTy.
+ // Currently the only call to buildScalarSplat occurs when
+ // isKnownGT(ScalarTySize, XLenTySize) so we don't need to hanle this case
+ // yet.
+
+ LLT ScalarTy = MRI.getType(Scalar);
+
+ assert(XLenTy == LLT::scalar(32) && ScalarTy == LLT::scalar(64) &&
+ "Unexpected scalar for splat lowering!");
+
+ if (auto C = getIConstantVRegSExtVal(VL, MRI);
+ *C == 1 && isNullOrNullSplat(*MRI.getVRegDef(Scalar), MRI))
+ return MIB.buildInstr(RISCV::G_SCALAR_MOVE_VL, {Dst},
+ {Passthru, MIB.buildConstant(XLenTy, 0), VL});
+
+ // Otherwise use the more complicated splatting algorithm.
+ return buildSplatSplitS64WithVL(Dst, Passthru, Scalar, VL, MIB, MRI);
+}
+
+// This function lowers an insert of a scalar operand Scalar into lane
+// 0 of the vector regardless of the value of VL. The contents of the
+// remaining lanes of the result vector are unspecified. VL is assumed
+// to be non-zero.
+static MachineInstrBuilder buildScalarInsert(const DstOp &Dst, Register Scalar,
+ const SrcOp &VL,
+ MachineIRBuilder &MIB,
+ MachineRegisterInfo &MRI,
+ const RISCVSubtarget &Subtarget) {
+ LLT VecTy = Dst.getLLTTy(MRI);
+ assert(VecTy.isScalableVector() && "Expect Dst is scalable vector type.");
+
+ const LLT XLenTy(Subtarget.getXLenVT());
+ auto Undef = MIB.buildUndef(VecTy);
+
+ // Dst = G_INSERT_VECTOR_ELT Undef (G_EXTRACT_VECTOR_ELT V 0) N -> Dst = V
+ MachineInstr *ScalarMI = MRI.getVRegDef(Scalar);
+ if (ScalarMI->getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT &&
+ isNullOrNullSplat(*MRI.getVRegDef(ScalarMI->getOperand(2).getReg()),
+ MRI)) {
+ Register V = ScalarMI->getOperand(1).getReg();
+ LLT VTy = MRI.getType(V);
+ // If V is not big enough, merge it with Undef.
+ if (TypeSize::isKnownLE(VTy.getSizeInBits(), VecTy.getSizeInBits()))
+ return MIB.buildInsert(Dst, Undef, Scalar, 0);
+ // V is as big or bigger then VecTy. Use an extract to get the correct Dst
+ // type.
+ return MIB.buildExtract(Dst, V, 0);
+ }
+
+ // Avoid the tricky legalization cases by falling back to using the
+ // splat code which already handles it gracefully.
+ LLT ScalarTy = MRI.getType(Scalar);
+ if (TypeSize::isKnownGT(ScalarTy.getSizeInBits(), XLenTy.getSizeInBits()))
+ return buildScalarSplat(Dst, Undef, Scalar,
+ MIB.buildConstant(XLenTy, 1).getReg(0), MIB, MRI,
+ Subtarget);
+
+ Register ExtScalar = Scalar;
+ if (TypeSize::isKnownLT(ScalarTy.getSizeInBits(), XLenTy.getSizeInBits())) {
+ // If the operand is a constant, sign extend to increase our chances
+ // of being able to use a .vi instruction. ANY_EXTEND would become a
+ // a zero extend and the simm5 check in isel would fail.
+ // FIXME: Should we ignore the upper bits in isel instead?
+ unsigned ExtOpc = isConstantOrConstantVector(*MRI.getVRegDef(Scalar), MRI)
+ ? TargetOpcode::G_SEXT
+ : TargetOpcode::G_ANYEXT;
+ ExtScalar = MIB.buildInstr(ExtOpc, {XLenTy}, {Scalar}).getReg(0);
+ }
+ return MIB.buildInstr(RISCV::G_SCALAR_MOVE_VL, {Dst}, {Undef, ExtScalar, VL});
+}
+
// Lower splats of s1 types to G_ICMP. For each mask vector type, we have a
// legal equivalently-sized i8 type, so we can use that as a go-between.
// Splats of s1 types that have constant value can be legalized as VMSET_VL or
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-knownvlen.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-knownvlen.mir
new file mode 100644
index 00000000000000..434e8c0bc60e77
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-knownvlen.mir
@@ -0,0 +1,65 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+--- |
+ define <vscale x 4 x i32> @insertelement_nxv4i32_known_vlen() #0 {
+ %a = insertelement <vscale x 4 x i32> poison, i32 0, i64 3
+ ret <vscale x 4 x i32> %a
+ }
+ define <vscale x 8 x i64> @insertelement_nxv8i64_known_vlen() #0 {
+ %a = insertelement <vscale x 8 x i64> poison, i64 0, i64 7
+ ret <vscale x 8 x i64> %a
+ }
+ attributes #0 = { vscale_range(2,2) }
+...
+---
+name: insertelement_nxv4i32_known_vlen
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ ; CHECK-LABEL: name: insertelement_nxv4i32_known_vlen
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[C]](s32)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SCALAR_MOVE_VL [[DEF1]], [[SEXT]](s64), $x0
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEUP_VL [[DEF]], [[SCALAR_MOVE_VL]], [[C1]](s64), [[VMSET_VL]](<vscale x 2 x s1>), [[C2]](s64), 0
+ ; CHECK-NEXT: $v8m2 = COPY %3:_(<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_CONSTANT i64 3
+ %3:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m2 = COPY %3(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: insertelement_nxv8i64_known_vlen
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ ; CHECK-LABEL: name: insertelement_nxv8i64_known_vlen
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_EXTRACT [[DEF]](<vscale x 8 x s64>), 3
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SCALAR_MOVE_VL [[DEF1]], [[C]](s64), $x0
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 1 x s64>) = G_VSLIDEUP_VL [[EXTRACT]], [[SCALAR_MOVE_VL]], [[C1]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[C2]](s64), 0
+ ; CHECK-NEXT: [[INSERT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT [[EXTRACT]], [[VSLIDEUP_VL]](<vscale x 1 x s64>), 3
+ ; CHECK-NEXT: $v8m8 = COPY [[INSERT]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %1:_(s64) = G_CONSTANT i64 0
+ %2:_(s64) = G_CONSTANT i64 7
+ %3:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m8 = COPY %3(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-rv64.mir
new file mode 100644
index 00000000000000..25c1a5be3ddc46
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-insertelement-rv64.mir
@@ -0,0 +1,856 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: insertelement_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: insertelement_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C2]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[C3]](s64)
+ ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C3]](s64)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SCALAR_MOVE_VL [[DEF1]], [[ASHR1]](s64), $x0
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C4]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 1 x s8>) = G_VSLIDEUP_VL [[DEF]], [[SCALAR_MOVE_VL]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8 = COPY %3:_(<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(s8) = G_CONSTANT i8 0
+ %3:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8 = COPY %3(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: insertelement_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; CHECK-LABEL: name: insertelement_nxv2i8
+ ; CHECK: liveins: $v8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C2]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[C3]](s64)
+ ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C3]](s64)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SCALAR_MOVE_VL [[DEF1]], [[ASHR1]](s64), $x0
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C4]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 2 x s8>) = G_VSLIDEUP_VL [[DEF]], [[SCALAR_MOVE_VL]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8 = COPY %3:_(<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 1
+ %2:_(s8) = G_CONSTANT i8 1
+ %3:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8 = COPY %3(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: insertelement_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; CHECK-LABEL: name: insertelement_nxv4i8
+ ; CHECK: liveins: $v8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C2]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[C3]](s64)
+ ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C3]](s64)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SCALAR_MOVE_VL [[DEF]], [[ASHR1]](s64), $x0
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C4]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEUP_VL [[COPY]], [[SCALAR_MOVE_VL]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 4 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8 = COPY %3:_(<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(s8) = G_CONSTANT i8 0
+ %3:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8 = COPY %3(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: insertelement_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; CHECK-LABEL: name: insertelement_nxv8i8
+ ; CHECK: liveins: $v8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C2]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[C3]](s64)
+ ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C3]](s64)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SCALAR_MOVE_VL [[DEF]], [[ASHR1]](s64), $x0
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C4]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEUP_VL [[COPY]], [[SCALAR_MOVE_VL]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 8 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8 = COPY %3:_(<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = COPY $v8
+ %1:_(s8) = G_CONSTANT i8 1
+ %2:_(s8) = G_CONSTANT i8 1
+ %3:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8 = COPY %3(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: insertelement_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2, $x10, $x11
+ ; CHECK-LABEL: name: insertelement_nxv16i8
+ ; CHECK: liveins: $v8m2, $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY2]], [[C]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SCALAR_MOVE_VL [[DEF]], [[COPY1]](s64), $x0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C1]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 16 x s8>) = G_VSLIDEUP_VL [[COPY]], [[SCALAR_MOVE_VL]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 16 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8m2 = COPY %5:_(<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = COPY $v8m2
+ %4:_(s64) = COPY $x10
+ %3:_(s64) = COPY $x11
+ %1:_(s8) = G_TRUNC %4(s64)
+ %2:_(s8) = G_TRUNC %3(s64)
+ %5:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m2 = COPY %5(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: insertelement_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $x10, $x11
+ ; CHECK-LABEL: name: insertelement_nxv32i8
+ ; CHECK: liveins: $v8m4, $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY2]], [[C]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SCALAR_MOVE_VL [[DEF]], [[COPY1]](s64), $x0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C1]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 32 x s8>) = G_VSLIDEUP_VL [[COPY]], [[SCALAR_MOVE_VL]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 32 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8m4 = COPY %5:_(<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = COPY $v8m4
+ %4:_(s64) = COPY $x10
+ %3:_(s64) = COPY $x11
+ %1:_(s8) = G_TRUNC %4(s64)
+ %2:_(s8) = G_TRUNC %3(s64)
+ %5:_(<vscale x 32 x s8>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m4 = COPY %5(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: insertelement_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: insertelement_nxv64i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C2]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[C3]](s64)
+ ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C3]](s64)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SCALAR_MOVE_VL [[DEF1]], [[ASHR1]](s64), $x0
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C4]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 64 x s8>) = G_VSLIDEUP_VL [[DEF]], [[SCALAR_MOVE_VL]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 64 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8m8 = COPY %3:_(<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(s8) = G_CONSTANT i8 0
+ %3:_(<vscale x 64 x s8>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m8 = COPY %3(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+---
+name: insertelement_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: insertelement_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C2]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[C3]](s64)
+ ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C3]](s64)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SCALAR_MOVE_VL [[DEF1]], [[ASHR1]](s64), $x0
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C4]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 1 x s16>) = G_VSLIDEUP_VL [[DEF]], [[SCALAR_MOVE_VL]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8 = COPY %3:_(<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(s16) = G_CONSTANT i16 0
+ %3:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8 = COPY %3(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: insertelement_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: insertelement_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C2]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[C3]](s64)
+ ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C3]](s64)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SCALAR_MOVE_VL [[DEF1]], [[ASHR1]](s64), $x0
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C4]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 2 x s16>) = G_VSLIDEUP_VL [[DEF]], [[SCALAR_MOVE_VL]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8 = COPY %3:_(<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 1
+ %2:_(s16) = G_CONSTANT i16 1
+ %3:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8 = COPY %3(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: insertelement_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; CHECK-LABEL: name: insertelement_nxv4i16
+ ; CHECK: liveins: $v8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C2]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[C3]](s64)
+ ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C3]](s64)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SCALAR_MOVE_VL [[DEF]], [[ASHR1]](s64), $x0
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C4]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 4 x s16>) = G_VSLIDEUP_VL [[COPY]], [[SCALAR_MOVE_VL]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 4 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8 = COPY %3:_(<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = COPY $v8
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(s16) = G_CONSTANT i16 0
+ %3:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8 = COPY %3(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: insertelement_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2
+ ; CHECK-LABEL: name: insertelement_nxv8i16
+ ; CHECK: liveins: $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C2]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT1]], [[C3]](s64)
+ ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C3]](s64)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SCALAR_MOVE_VL [[DEF]], [[ASHR1]](s64), $x0
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C4]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 8 x s16>) = G_VSLIDEUP_VL [[COPY]], [[SCALAR_MOVE_VL]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 8 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8m2 = COPY %3:_(<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = COPY $v8m2
+ %1:_(s16) = G_CONSTANT i16 1
+ %2:_(s16) = G_CONSTANT i16 1
+ %3:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m2 = COPY %3(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: insertelement_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4, $x10, $x11
+ ; CHECK-LABEL: name: insertelement_nxv16i16
+ ; CHECK: liveins: $v8m4, $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY2]], [[C]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SCALAR_MOVE_VL [[DEF]], [[COPY1]](s64), $x0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C1]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 16 x s16>) = G_VSLIDEUP_VL [[COPY]], [[SCALAR_MOVE_VL]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 16 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8m4 = COPY %5:_(<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = COPY $v8m4
+ %4:_(s64) = COPY $x10
+ %3:_(s64) = COPY $x11
+ %1:_(s16) = G_TRUNC %4(s64)
+ %2:_(s16) = G_TRUNC %3(s64)
+ %5:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m4 = COPY %5(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: insertelement_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $x10, $x11
+ ; CHECK-LABEL: name: insertelement_nxv32i16
+ ; CHECK: liveins: $v8m8, $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY2]], [[C]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SCALAR_MOVE_VL [[DEF]], [[COPY1]](s64), $x0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C1]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 32 x s16>) = G_VSLIDEUP_VL [[COPY]], [[SCALAR_MOVE_VL]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 32 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8m8 = COPY %5:_(<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = COPY $v8m8
+ %4:_(s64) = COPY $x10
+ %3:_(s64) = COPY $x11
+ %1:_(s16) = G_TRUNC %4(s64)
+ %2:_(s16) = G_TRUNC %3(s64)
+ %5:_(<vscale x 32 x s16>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m8 = COPY %5(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: insertelement_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: insertelement_nxv1i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[C1]](s32)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[C]](s32)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SCALAR_MOVE_VL [[DEF1]], [[SEXT1]](s64), $x0
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[SEXT]], [[C2]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 1 x s32>) = G_VSLIDEUP_VL [[DEF]], [[SCALAR_MOVE_VL]], [[SEXT]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8 = COPY %3:_(<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(s32) = G_CONSTANT i32 0
+ %3:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8 = COPY %3(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: insertelement_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: insertelement_nxv2i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[C1]](s32)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[C]](s32)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SCALAR_MOVE_VL [[DEF1]], [[SEXT1]](s64), $x0
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[SEXT]], [[C2]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEUP_VL [[DEF]], [[SCALAR_MOVE_VL]], [[SEXT]](s64), [[VMSET_VL]](<vscale x 2 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8 = COPY %3:_(<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 1
+ %2:_(s32) = G_CONSTANT i32 1
+ %3:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8 = COPY %3(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: insertelement_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m2
+ ; CHECK-LABEL: name: insertelement_nxv4i32
+ ; CHECK: liveins: $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[C1]](s32)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[C]](s32)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SCALAR_MOVE_VL [[DEF]], [[SEXT1]](s64), $x0
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[SEXT]], [[C2]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 4 x s32>) = G_VSLIDEUP_VL [[COPY]], [[SCALAR_MOVE_VL]], [[SEXT]](s64), [[VMSET_VL]](<vscale x 4 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8m2 = COPY %3:_(<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = COPY $v8m2
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(s32) = G_CONSTANT i32 0
+ %3:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m2 = COPY %3(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: insertelement_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4
+ ; CHECK-LABEL: name: insertelement_nxv8i32
+ ; CHECK: liveins: $v8m4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[C1]](s32)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[C]](s32)
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SCALAR_MOVE_VL [[DEF]], [[SEXT1]](s64), $x0
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[SEXT]], [[C2]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 8 x s32>) = G_VSLIDEUP_VL [[COPY]], [[SCALAR_MOVE_VL]], [[SEXT]](s64), [[VMSET_VL]](<vscale x 8 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8m4 = COPY %3:_(<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = COPY $v8m4
+ %1:_(s32) = G_CONSTANT i32 1
+ %2:_(s32) = G_CONSTANT i32 1
+ %3:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m4 = COPY %3(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: insertelement_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $x10, $x11
+ ; CHECK-LABEL: name: insertelement_nxv16i32
+ ; CHECK: liveins: $v8m8, $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY2]], 32
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SCALAR_MOVE_VL [[DEF]], [[COPY1]](s64), $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[SEXT_INREG]], [[C]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 16 x s32>) = G_VSLIDEUP_VL [[COPY]], [[SCALAR_MOVE_VL]], [[SEXT_INREG]](s64), [[VMSET_VL]](<vscale x 16 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8m8 = COPY %5:_(<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = COPY $v8m8
+ %4:_(s64) = COPY $x10
+ %3:_(s64) = COPY $x11
+ %1:_(s32) = G_TRUNC %4(s64)
+ %2:_(s32) = G_TRUNC %3(s64)
+ %5:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m8 = COPY %5(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: insertelement_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: insertelement_nxv1i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SCALAR_MOVE_VL [[DEF]], [[C]](s64), $x0
+ ; CHECK-NEXT: $v8 = COPY %3:_(<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %1:_(s64) = G_CONSTANT i64 0
+ %2:_(s64) = G_CONSTANT i64 0
+ %3:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8 = COPY %3(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: insertelement_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: insertelement_nxv2i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SCALAR_MOVE_VL [[DEF1]], [[C]](s64), $x0
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 1 x s64>) = G_VSLIDEUP_VL [[DEF]], [[SCALAR_MOVE_VL]], [[C1]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[C2]](s64), 0
+ ; CHECK-NEXT: $v8m2 = COPY %3:_(<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %1:_(s64) = G_CONSTANT i64 1
+ %2:_(s64) = G_CONSTANT i64 1
+ %3:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m2 = COPY %3(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: insertelement_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m4
+ ; CHECK-LABEL: name: insertelement_nxv4i64
+ ; CHECK: liveins: $v8m4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SCALAR_MOVE_VL [[COPY]], [[C]](s64), $x0
+ ; CHECK-NEXT: $v8m4 = COPY %3:_(<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = COPY $v8m4
+ %1:_(s64) = G_CONSTANT i64 0
+ %2:_(s64) = G_CONSTANT i64 0
+ %3:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m4 = COPY %3(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: insertelement_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8
+ ; CHECK-LABEL: name: insertelement_nxv8i64
+ ; CHECK: liveins: $v8m8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SCALAR_MOVE_VL [[DEF]], [[C]](s64), $x0
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 1 x s64>) = G_VSLIDEUP_VL [[COPY]], [[SCALAR_MOVE_VL]], [[C1]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[C2]](s64), 0
+ ; CHECK-NEXT: $v8m8 = COPY %3:_(<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = COPY $v8m8
+ %1:_(s64) = G_CONSTANT i64 1
+ %2:_(s64) = G_CONSTANT i64 1
+ %3:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m8 = COPY %3(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: insertelement_nxv8i64_2
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $x10, $x11
+ ; CHECK-LABEL: name: insertelement_nxv8i64_2
+ ; CHECK: liveins: $v8m8, $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x11
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SCALAR_MOVE_VL:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SCALAR_MOVE_VL [[DEF]], [[COPY1]](s64), $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[C]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 8 x s64>) = G_VSLIDEUP_VL [[COPY]], [[SCALAR_MOVE_VL]], [[COPY2]](s64), [[VMSET_VL]](<vscale x 8 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8m8 = COPY %3:_(<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = COPY $v8m8
+ %1:_(s64) = COPY $x10
+ %2:_(s64) = COPY $x11
+ %5:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT %0, %1, %2
+ $v8m8 = COPY %5(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+
+# These tests check the legalization of a G_INSERT_VECTOR_ELT whose scalar element
+# operand is G_EXTRACT_VECTOR_ELT that has an index operand of zero.
+---
+name: insertelement_nxv4i8_extractelement_from_same_vector
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; CHECK-LABEL: name: insertelement_nxv4i8_extractelement_from_same_vector
+ ; CHECK: liveins: $v8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 4 x s8>), [[TRUNC]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C2]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[INSERT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT [[DEF]], [[EVEC]](s8), 0
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C3]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEUP_VL [[COPY]], [[INSERT]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 4 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8 = COPY %4:_(<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(s8) = G_CONSTANT i8 1
+ %3:_(s8) = G_EXTRACT_VECTOR_ELT %0, %1
+ %4:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT %0, %3, %2
+ $v8 = COPY %4(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: insertelement_nxv4i8_extractelement_from_same_vector_type
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; CHECK-LABEL: name: insertelement_nxv4i8_extractelement_from_same_vector_type
+ ; CHECK: liveins: $v8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 4 x s8>), [[TRUNC]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C2]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[INSERT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT [[DEF1]], [[EVEC]](s8), 0
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C3]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEUP_VL [[DEF]], [[INSERT]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 4 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8 = COPY %5:_(<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = COPY $v8
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %2:_(s8) = G_CONSTANT i8 0
+ %3:_(s8) = G_CONSTANT i8 1
+ %4:_(s8) = G_EXTRACT_VECTOR_ELT %0, %2
+ %5:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT %1, %4, %3
+ $v8 = COPY %5(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: insertelement_nxv2i16_extractelement_from_smaller_vector_type
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; CHECK-LABEL: name: insertelement_nxv2i16_extractelement_from_smaller_vector_type
+ ; CHECK: liveins: $v8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 1 x s16>), [[TRUNC]](s16)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C2]](s64)
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C2]](s64)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[INSERT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT [[DEF1]], [[EVEC]](s8), 0
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C3]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 2 x s16>) = G_VSLIDEUP_VL [[DEF]], [[INSERT]], [[ASHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8 = COPY %5:_(<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = COPY $v8
+ %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %2:_(s16) = G_CONSTANT i16 0
+ %3:_(s16) = G_CONSTANT i16 1
+ %4:_(s8) = G_EXTRACT_VECTOR_ELT %0, %2
+ %5:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT %1, %4, %3
+ $v8 = COPY %5(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: insertelement_nxv2i32_extractelement_from_larger_vector_type
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $v8
+ ; CHECK-LABEL: name: insertelement_nxv2i32_extractelement_from_larger_vector_type
+ ; CHECK: liveins: $v8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[C]](s32)
+ ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_EXTRACT [[COPY]](<vscale x 4 x s32>), 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[SEXT]], [[C1]]
+ ; CHECK-NEXT: [[VSLIDEUP_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEUP_VL [[DEF]], [[EXTRACT]], [[SEXT]](s64), [[VMSET_VL]](<vscale x 2 x s1>), [[ADD]](s64), 0
+ ; CHECK-NEXT: $v8 = COPY %5:_(<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s32>) = COPY $v8
+ %1:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %2:_(s32) = G_CONSTANT i32 0
+ %3:_(s32) = G_CONSTANT i32 1
+ %4:_(s8) = G_EXTRACT_VECTOR_ELT %0, %2
+ %5:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT %1, %4, %3
+ $v8 = COPY %5(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
>From 65c580fca4a49292a2a2c43c4d7ffbb3385b89cf Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 11 Sep 2024 10:48:01 -0700
Subject: [PATCH 4/4] fixup! add string to assert
---
llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 1d824008089bfb..2291137e9d335c 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -839,7 +839,7 @@ static LLT getLMUL1Ty(LLT VecTy) {
static std::optional<LLT>
getSmallestLLTForIndex(LLT VecTy, unsigned MaxIdx,
const RISCVSubtarget &Subtarget) {
- assert(VecTy.isScalableVector());
+ assert(VecTy.isScalableVector() && "Expected scalable vector");
const unsigned EltSize = VecTy.getScalarSizeInBits();
const unsigned VectorBitsMin = Subtarget.getRealMinVLen();
const unsigned MinVLMAX = VectorBitsMin / EltSize;
More information about the llvm-commits
mailing list