[llvm] [GlobalISel] Introduce `G_POISON` (PR #127825)
Mateusz Sokół via llvm-commits
llvm-commits at lists.llvm.org
Sun Apr 20 13:23:27 PDT 2025
https://github.com/mtsokol updated https://github.com/llvm/llvm-project/pull/127825
>From 0d075956f954ff1349d9309eb5c7f39e57fc7ada Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <mat646 at gmail.com>
Date: Wed, 19 Feb 2025 16:02:55 +0000
Subject: [PATCH 1/5] [GlobalISel] Introduce `G_POISON`
---
llvm/docs/GlobalISel/GenericOpcode.rst | 9 ++++
.../llvm/CodeGen/GlobalISel/CombinerHelper.h | 17 +++++++
.../CodeGen/GlobalISel/GenericMachineInstrs.h | 8 +++
.../CodeGen/GlobalISel/MachineIRBuilder.h | 5 +-
llvm/include/llvm/Support/TargetOpcodes.def | 5 +-
llvm/include/llvm/Target/GenericOpcodes.td | 6 +++
.../include/llvm/Target/GlobalISel/Combine.td | 49 ++++++++++++++++++-
llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp | 3 +-
.../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 43 +++++++++++++++-
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 2 +
.../GlobalISel/LegacyLegalizerInfo.cpp | 2 +
.../CodeGen/GlobalISel/LegalizerHelper.cpp | 7 ++-
.../GlobalISel/LostDebugLocObserver.cpp | 1 +
.../CodeGen/GlobalISel/MachineIRBuilder.cpp | 4 ++
llvm/lib/CodeGen/GlobalISel/Utils.cpp | 10 +++-
llvm/lib/CodeGen/MachineSSAContext.cpp | 3 +-
.../X86/GlobalISel/calllowering-nocrashret.ll | 15 +++++-
.../GlobalISelEmitter/GlobalISelEmitter.td | 2 +-
18 files changed, 178 insertions(+), 13 deletions(-)
diff --git a/llvm/docs/GlobalISel/GenericOpcode.rst b/llvm/docs/GlobalISel/GenericOpcode.rst
index 5291b42d80870..3e80f608a82aa 100644
--- a/llvm/docs/GlobalISel/GenericOpcode.rst
+++ b/llvm/docs/GlobalISel/GenericOpcode.rst
@@ -24,6 +24,15 @@ An undefined value.
%0:_(s32) = G_IMPLICIT_DEF
+G_POISON
+^^^^^^^^
+
+A poison value.
+
+.. code-block:: none
+
+ %0:_(s32) = G_POISON
+
G_CONSTANT
^^^^^^^^^^
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index c15263e0b06f8..f40f84aa60ba4 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -432,16 +432,27 @@ class CombinerHelper {
/// G_IMPLICIT_DEF.
bool matchAnyExplicitUseIsUndef(MachineInstr &MI) const;
+ /// Return true if any explicit use operand on \p MI is defined by a
+ /// G_POISON.
+ bool matchAnyExplicitUseIsPoison(MachineInstr &MI) const;
+
/// Return true if all register explicit use operands on \p MI are defined by
/// a G_IMPLICIT_DEF.
bool matchAllExplicitUsesAreUndef(MachineInstr &MI) const;
+ /// Return true if all register explicit use operands on \p MI are defined by
+ /// a G_POISON.
+ bool matchAllExplicitUsesArePoison(MachineInstr &MI) const;
+
/// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
bool matchUndefShuffleVectorMask(MachineInstr &MI) const;
/// Return true if a G_STORE instruction \p MI is storing an undef value.
bool matchUndefStore(MachineInstr &MI) const;
+ /// Return true if a G_STORE instruction \p MI is storing a poison value.
+ bool matchPoisonStore(MachineInstr &MI) const;
+
/// Return true if a G_SELECT instruction \p MI has an undef comparison.
bool matchUndefSelectCmp(MachineInstr &MI) const;
@@ -467,6 +478,9 @@ class CombinerHelper {
/// Replace an instruction with a G_IMPLICIT_DEF.
void replaceInstWithUndef(MachineInstr &MI) const;
+ /// Replace an instruction with a G_POISON.
+ void replaceInstWithPoison(MachineInstr &MI) const;
+
/// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx) const;
@@ -507,6 +521,9 @@ class CombinerHelper {
/// Check if operand \p OpIdx is undef.
bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) const;
+ /// Check if operand \p OpIdx is poison.
+ bool matchOperandIsPoison(MachineInstr &MI, unsigned OpIdx) const;
+
/// Check if operand \p OpIdx is known to be a power of 2.
bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI,
unsigned OpIdx) const;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
index 9e5d4d34f24d2..ddcff441cea3b 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h
@@ -343,6 +343,14 @@ class GImplicitDef : public GenericMachineInstr {
}
};
+/// Represents a G_POISON.
+class GPoison : public GenericMachineInstr {
+public:
+ static bool classof(const MachineInstr *MI) {
+ return MI->getOpcode() == TargetOpcode::G_POISON;
+ }
+};
+
/// Represents a G_SELECT.
class GSelect : public GenericMachineInstr {
public:
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index f9dcbeb370bef..d3d506a0b9b3d 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -1017,9 +1017,12 @@ class MachineIRBuilder {
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index);
- /// Build and insert \p Res = IMPLICIT_DEF.
+ /// Build and insert \p Res = G_IMPLICIT_DEF.
MachineInstrBuilder buildUndef(const DstOp &Res);
+ /// Build and insert \p Res = G_POISON.
+ MachineInstrBuilder buildPoison(const DstOp &Res);
+
/// Build and insert \p Res = G_MERGE_VALUES \p Op0, ...
///
/// G_MERGE_VALUES combines the input elements contiguously into a larger
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index 5ef3707b81fe9..0170ac327c855 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -295,9 +295,12 @@ HANDLE_TARGET_OPCODE(G_ABDS)
/// Generic absolute difference unsigned instruction.
HANDLE_TARGET_OPCODE(G_ABDU)
-
+// Generic implicit definition.
HANDLE_TARGET_OPCODE(G_IMPLICIT_DEF)
+// Generic poison value.
+HANDLE_TARGET_OPCODE(G_POISON)
+
/// Generic PHI instruction with types.
HANDLE_TARGET_OPCODE(G_PHI)
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index e134bab61bf63..5b71641854eac 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -92,6 +92,12 @@ def G_IMPLICIT_DEF : GenericInstruction {
let hasSideEffects = false;
}
+def G_POISON : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins);
+ let hasSideEffects = false;
+}
+
def G_PHI : GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins variable_ops);
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index e5e19a1d93486..258e846e52e5f 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -430,6 +430,12 @@ def binop_right_undef_to_undef: GICombineRule<
[{ return Helper.matchOperandIsUndef(*${root}, 2); }]),
(apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
+def binop_right_poison_to_poison
+ : GICombineRule<(defs root:$root),
+ (match(wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root,
+ [{ return Helper.matchOperandIsPoison(*${root}, 2); }]),
+ (apply [{ Helper.replaceInstWithPoison(*${root}); }])>;
+
def unary_undef_to_zero: GICombineRule<
(defs root:$root),
(match (wip_match_opcode G_ABS):$root,
@@ -447,6 +453,17 @@ def unary_undef_to_undef : GICombineRule<
(match (unary_undef_to_undef_frags $dst)),
(apply [{ Helper.replaceInstWithUndef(*${dst}.getParent()); }])>;
+def unary_poison_to_poison_frags
+ : GICombinePatFrag<(outs root:$dst), (ins),
+ !foreach(op,
+ [G_TRUNC, G_BITCAST, G_ANYEXT, G_PTRTOINT,
+ G_INTTOPTR, G_FPTOSI, G_FPTOUI],
+ (pattern(op $dst, $x), (G_POISON $x)))>;
+def unary_poison_to_poison
+ : GICombineRule<
+ (defs root:$dst), (match(unary_poison_to_poison_frags $dst)),
+ (apply [{ Helper.replaceInstWithPoison(*${dst}.getParent()); }])>;
+
// Instructions where if any source operand is undef, the instruction can be
// replaced with undef.
def propagate_undef_any_op: GICombineRule<
@@ -455,6 +472,15 @@ def propagate_undef_any_op: GICombineRule<
[{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
(apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
+// Instructions where if any source operand is poison, the instruction can be
+// replaced with poison.
+def propagate_poison_any_op
+ : GICombineRule<
+ (defs root:$root),
+ (match(wip_match_opcode G_ADD, G_SUB, G_XOR):$root,
+ [{ return Helper.matchAnyExplicitUseIsPoison(*${root}); }]),
+ (apply [{ Helper.replaceInstWithPoison(*${root}); }])>;
+
// Instructions where if all source operands are undef, the instruction can be
// replaced with undef.
def propagate_undef_all_ops: GICombineRule<
@@ -463,6 +489,15 @@ def propagate_undef_all_ops: GICombineRule<
[{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]),
(apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
+// Instructions where if all source operands are poison, the instruction can be
+// replaced with poison.
+def propagate_poison_all_ops
+ : GICombineRule<
+ (defs root:$root),
+ (match(wip_match_opcode G_SHUFFLE_VECTOR, G_BUILD_VECTOR):$root,
+ [{ return Helper.matchAllExplicitUsesArePoison(*${root}); }]),
+ (apply [{ Helper.replaceInstWithPoison(*${root}); }])>;
+
// Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF.
def propagate_undef_shuffle_mask: GICombineRule<
(defs root:$root),
@@ -654,6 +689,13 @@ def erase_undef_store : GICombineRule<
(apply [{ Helper.eraseInst(*${root}); }])
>;
+// Erase stores of poison values.
+def erase_poison_store
+ : GICombineRule<(defs root:$root),
+ (match(wip_match_opcode G_STORE):$root,
+ [{ return Helper.matchPoisonStore(*${root}); }]),
+ (apply [{ Helper.eraseInst(*${root}); }])>;
+
def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">;
def simplify_add_to_sub: GICombineRule <
(defs root:$root, simplify_add_to_sub_matchinfo:$info),
@@ -1981,6 +2023,11 @@ def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
erase_undef_store,
insert_extract_vec_elt_out_of_bounds]>;
+def poison_combines
+ : GICombineGroup<[binop_right_poison_to_poison, unary_poison_to_poison,
+ propagate_poison_any_op, propagate_poison_all_ops,
+ erase_poison_store]>;
+
def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
binop_same_val, binop_left_to_zero,
binop_right_to_zero, p2i_to_i2p,
@@ -2036,7 +2083,7 @@ def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
vector_ops_combines, freeze_combines, cast_combines,
insert_vec_elt_combines, extract_vec_elt_combines, combines_for_extload,
combine_extracted_vector_load,
- undef_combines, identity_combines, phi_combines,
+ undef_combines, poison_combines, identity_combines, phi_combines,
simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands, shifts_too_big,
reassocs, ptr_add_immed_chain, cmp_combines,
shl_ashr_to_sext_inreg, neg_and_one_to_sext_inreg, sext_inreg_of_load,
diff --git a/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp b/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp
index fd89e40a5a1ee..71dd850a7142b 100644
--- a/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CSEInfo.cpp
@@ -54,6 +54,7 @@ bool CSEConfigFull::shouldCSEOpc(unsigned Opc) {
case TargetOpcode::G_CONSTANT:
case TargetOpcode::G_FCONSTANT:
case TargetOpcode::G_IMPLICIT_DEF:
+ case TargetOpcode::G_POISON:
case TargetOpcode::G_ZEXT:
case TargetOpcode::G_SEXT:
case TargetOpcode::G_ANYEXT:
@@ -82,7 +83,7 @@ bool CSEConfigFull::shouldCSEOpc(unsigned Opc) {
bool CSEConfigConstantOnly::shouldCSEOpc(unsigned Opc) {
return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT ||
- Opc == TargetOpcode::G_IMPLICIT_DEF;
+ Opc == TargetOpcode::G_IMPLICIT_DEF || Opc == TargetOpcode::G_POISON;
}
std::unique_ptr<CSEConfigBase>
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 333f0c17bacc5..610d3dd5f95c1 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -330,6 +330,7 @@ bool CombinerHelper::matchCombineConcatVectors(
for (const MachineOperand &BuildVecMO : Def->uses())
Ops.push_back(BuildVecMO.getReg());
break;
+ case TargetOpcode::G_POISON:
case TargetOpcode::G_IMPLICIT_DEF: {
LLT OpType = MRI.getType(Reg);
// Keep one undef value for all the undef operands.
@@ -2736,6 +2737,12 @@ bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) const {
});
}
+bool CombinerHelper::matchAnyExplicitUseIsPoison(MachineInstr &MI) const {
+ return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
+ return MO.isReg() && getOpcodeDef(TargetOpcode::G_POISON, MO.getReg(), MRI);
+ });
+}
+
bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) const {
return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
return !MO.isReg() ||
@@ -2743,6 +2750,13 @@ bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) const {
});
}
+bool CombinerHelper::matchAllExplicitUsesArePoison(MachineInstr &MI) const {
+ return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
+ return !MO.isReg() ||
+ getOpcodeDef(TargetOpcode::G_POISON, MO.getReg(), MRI);
+ });
+}
+
bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
@@ -2755,6 +2769,11 @@ bool CombinerHelper::matchUndefStore(MachineInstr &MI) const {
MRI);
}
+bool CombinerHelper::matchPoisonStore(MachineInstr &MI) const {
+ assert(MI.getOpcode() == TargetOpcode::G_STORE);
+ return getOpcodeDef(TargetOpcode::G_POISON, MI.getOperand(0).getReg(), MRI);
+}
+
bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_SELECT);
return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(),
@@ -2994,6 +3013,12 @@ bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI,
getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
}
+bool CombinerHelper::matchOperandIsPoison(MachineInstr &MI,
+ unsigned OpIdx) const {
+ MachineOperand &MO = MI.getOperand(OpIdx);
+ return MO.isReg() && getOpcodeDef(TargetOpcode::G_POISON, MO.getReg(), MRI);
+}
+
bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI,
unsigned OpIdx) const {
MachineOperand &MO = MI.getOperand(OpIdx);
@@ -3033,6 +3058,12 @@ void CombinerHelper::replaceInstWithUndef(MachineInstr &MI) const {
MI.eraseFromParent();
}
+void CombinerHelper::replaceInstWithPoison(MachineInstr &MI) const {
+ assert(MI.getNumDefs() == 1 && "Expected only one def?");
+ Builder.buildPoison(MI.getOperand(0));
+ MI.eraseFromParent();
+}
+
bool CombinerHelper::matchSimplifyAddToSub(
MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) const {
Register LHS = MI.getOperand(1).getReg();
@@ -3097,6 +3128,7 @@ bool CombinerHelper::matchCombineInsertVecElts(
// If we didn't end in a G_IMPLICIT_DEF and the source is not fully
// overwritten, bail out.
return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
+ TmpInst->getOpcode() == TargetOpcode::G_POISON ||
all_of(MatchInfo, [](Register Reg) { return !!Reg; });
}
@@ -3467,12 +3499,13 @@ bool CombinerHelper::matchUseVectorTruncate(MachineInstr &MI,
if (I < 2)
return false;
- // Check the remaining source elements are only G_IMPLICIT_DEF
+ // Check the remaining source elements are only G_IMPLICIT_DEF or G_POISON
for (; I < NumOperands; ++I) {
auto SrcMI = MRI.getVRegDef(BuildMI->getSourceReg(I));
auto SrcMIOpc = SrcMI->getOpcode();
- if (SrcMIOpc != TargetOpcode::G_IMPLICIT_DEF)
+ if (SrcMIOpc != TargetOpcode::G_IMPLICIT_DEF &&
+ SrcMIOpc != TargetOpcode::G_POISON)
return false;
}
@@ -7933,6 +7966,12 @@ bool CombinerHelper::matchShuffleDisjointMask(MachineInstr &MI,
if (getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, Shuffle.getSrc2Reg(), MRI))
return false;
+ if (getOpcodeDef(TargetOpcode::G_POISON, Shuffle.getSrc1Reg(), MRI))
+ return false;
+
+ if (getOpcodeDef(TargetOpcode::G_POISON, Shuffle.getSrc2Reg(), MRI))
+ return false;
+
const LLT DstTy = MRI.getType(Shuffle.getReg(0));
const LLT Src1Ty = MRI.getType(Shuffle.getSrc1Reg());
if (!isLegalOrBeforeLegalizer(
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 931e4fe19e69a..7343360c9b808 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -3649,6 +3649,8 @@ bool IRTranslator::translate(const Constant &C, Register Reg) {
EntryBuilder->buildConstant(Reg, *CI);
else if (auto CF = dyn_cast<ConstantFP>(&C))
EntryBuilder->buildFConstant(Reg, *CF);
+ else if (isa<PoisonValue>(C))
+ EntryBuilder->buildPoison(Reg);
else if (isa<UndefValue>(C))
EntryBuilder->buildUndef(Reg);
else if (isa<ConstantPointerNull>(C))
diff --git a/llvm/lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp
index 05923e5fc97cc..412f2373ac471 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegacyLegalizerInfo.cpp
@@ -82,6 +82,8 @@ LegacyLegalizerInfo::LegacyLegalizerInfo() {
setLegalizeScalarToDifferentSizeStrategy(
TargetOpcode::G_IMPLICIT_DEF, 0, narrowToSmallerAndUnsupportedIfTooSmall);
+ setLegalizeScalarToDifferentSizeStrategy(
+ TargetOpcode::G_POISON, 0, narrowToSmallerAndUnsupportedIfTooSmall);
setLegalizeScalarToDifferentSizeStrategy(
TargetOpcode::G_ADD, 0, widenToLargerTypesAndNarrowToLargest);
setLegalizeScalarToDifferentSizeStrategy(
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 0aa853389bf1a..58e5769a0a055 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -1425,6 +1425,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
switch (MI.getOpcode()) {
default:
return UnableToLegalize;
+ case TargetOpcode::G_POISON:
case TargetOpcode::G_IMPLICIT_DEF: {
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
@@ -3082,6 +3083,7 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
MI.eraseFromParent();
return Legalized;
}
+ case TargetOpcode::G_POISON:
case TargetOpcode::G_IMPLICIT_DEF: {
Observer.changingInstr(MI);
widenScalarDst(MI, WideTy);
@@ -5307,6 +5309,7 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
switch (MI.getOpcode()) {
case G_IMPLICIT_DEF:
+ case G_POISON:
case G_TRUNC:
case G_AND:
case G_OR:
@@ -6047,6 +6050,7 @@ LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
LLT MoreTy) {
unsigned Opc = MI.getOpcode();
switch (Opc) {
+ case TargetOpcode::G_POISON:
case TargetOpcode::G_IMPLICIT_DEF:
case TargetOpcode::G_LOAD: {
if (TypeIdx != 0)
@@ -8451,7 +8455,8 @@ LegalizerHelper::lowerVECTOR_COMPRESS(llvm::MachineInstr &MI) {
auto OutPos = MIRBuilder.buildConstant(IdxTy, 0);
bool HasPassthru =
- MRI.getVRegDef(Passthru)->getOpcode() != TargetOpcode::G_IMPLICIT_DEF;
+ MRI.getVRegDef(Passthru)->getOpcode() != TargetOpcode::G_IMPLICIT_DEF &&
+ MRI.getVRegDef(Passthru)->getOpcode() != TargetOpcode::G_POISON;
if (HasPassthru)
MIRBuilder.buildStore(Passthru, StackPtr, PtrInfo, VecAlign);
diff --git a/llvm/lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp b/llvm/lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp
index 6d606e5550f1a..8c19dd39a4aa8 100644
--- a/llvm/lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LostDebugLocObserver.cpp
@@ -85,6 +85,7 @@ static bool irTranslatorNeverAddsLocations(unsigned Opcode) {
case TargetOpcode::G_CONSTANT:
case TargetOpcode::G_FCONSTANT:
case TargetOpcode::G_IMPLICIT_DEF:
+ case TargetOpcode::G_POISON:
case TargetOpcode::G_GLOBAL_VALUE:
return true;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 359677027f52f..d1af81a80f998 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -642,6 +642,10 @@ MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
}
+MachineInstrBuilder MachineIRBuilder::buildPoison(const DstOp &Res) {
+ return buildInstr(TargetOpcode::G_POISON, {Res}, {});
+}
+
MachineInstrBuilder MachineIRBuilder::buildMergeValues(const DstOp &Res,
ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index 223d69c362185..80b3c94c51064 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -1470,6 +1470,7 @@ static bool isConstantScalar(const MachineInstr &MI,
switch (MI.getOpcode()) {
case TargetOpcode::G_CONSTANT:
case TargetOpcode::G_IMPLICIT_DEF:
+ case TargetOpcode::G_POISON:
return true;
case TargetOpcode::G_FCONSTANT:
return AllowFP;
@@ -1547,6 +1548,7 @@ llvm::isConstantOrConstantSplatVectorFP(MachineInstr &MI,
bool llvm::isNullOrNullSplat(const MachineInstr &MI,
const MachineRegisterInfo &MRI, bool AllowUndefs) {
switch (MI.getOpcode()) {
+ case TargetOpcode::G_POISON:
case TargetOpcode::G_IMPLICIT_DEF:
return AllowUndefs;
case TargetOpcode::G_CONSTANT:
@@ -1566,6 +1568,7 @@ bool llvm::isAllOnesOrAllOnesSplat(const MachineInstr &MI,
const MachineRegisterInfo &MRI,
bool AllowUndefs) {
switch (MI.getOpcode()) {
+ case TargetOpcode::G_POISON:
case TargetOpcode::G_IMPLICIT_DEF:
return AllowUndefs;
case TargetOpcode::G_CONSTANT:
@@ -1582,7 +1585,8 @@ bool llvm::matchUnaryPredicate(
std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
- if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
+ if (AllowUndefs && (Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
+ Def->getOpcode() == TargetOpcode::G_POISON))
return Match(nullptr);
// TODO: Also handle fconstant
@@ -1595,7 +1599,8 @@ bool llvm::matchUnaryPredicate(
for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
Register SrcElt = Def->getOperand(I).getReg();
const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
- if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
+ if (AllowUndefs && (Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
+ Def->getOpcode() == TargetOpcode::G_POISON)) {
if (!Match(nullptr))
return false;
continue;
@@ -1914,6 +1919,7 @@ static bool isGuaranteedNotToBeUndefOrPoison(Register Reg,
switch (RegDef->getOpcode()) {
case TargetOpcode::G_FREEZE:
return true;
+ case TargetOpcode::G_POISON:
case TargetOpcode::G_IMPLICIT_DEF:
return !includesUndef(Kind);
case TargetOpcode::G_CONSTANT:
diff --git a/llvm/lib/CodeGen/MachineSSAContext.cpp b/llvm/lib/CodeGen/MachineSSAContext.cpp
index bbbfb3ce2788d..a1ab9ffc191ac 100644
--- a/llvm/lib/CodeGen/MachineSSAContext.cpp
+++ b/llvm/lib/CodeGen/MachineSSAContext.cpp
@@ -56,7 +56,8 @@ const MachineBasicBlock *MachineSSAContext::getDefBlock(Register value) const {
static bool isUndef(const MachineInstr &MI) {
return MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
- MI.getOpcode() == TargetOpcode::IMPLICIT_DEF;
+ MI.getOpcode() == TargetOpcode::IMPLICIT_DEF ||
+ MI.getOpcode() == TargetOpcode::G_POISON;
}
/// MachineInstr equivalent of PHINode::hasConstantOrUndefValue() for G_PHI.
diff --git a/llvm/test/CodeGen/X86/GlobalISel/calllowering-nocrashret.ll b/llvm/test/CodeGen/X86/GlobalISel/calllowering-nocrashret.ll
index 5e9311559b400..74363203f8fca 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/calllowering-nocrashret.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/calllowering-nocrashret.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -mtriple=x86_64-linux-gnu -o - -global-isel %s -stop-after=irtranslator | FileCheck %s
-define <4 x i1> @foo() {
- ; CHECK-LABEL: name: foo
+define <4 x i1> @foo_undef() {
+ ; CHECK-LABEL: name: foo_undef
; CHECK: bb.1.entry:
; CHECK: [[DEF:%[0-9]+]]:_(<4 x s1>) = G_IMPLICIT_DEF
; CHECK: [[ANYEXT:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[DEF]](<4 x s1>)
@@ -11,3 +11,14 @@ define <4 x i1> @foo() {
entry:
ret <4 x i1> undef ;
}
+
+define <4 x i1> @foo_poison() {
+ ; CHECK-LABEL: name: foo_poison
+ ; CHECK: bb.1.entry:
+ ; CHECK: [[DEF:%[0-9]+]]:_(<4 x s1>) = G_POISON
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[DEF]](<4 x s1>)
+ ; CHECK: $xmm0 = COPY [[ANYEXT]](<4 x s32>)
+ ; CHECK: RET 0, implicit $xmm0
+entry:
+ ret <4 x i1> poison ;
+}
diff --git a/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td b/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td
index b7132bf2bcd8c..d1c0e5d9d3105 100644
--- a/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td
+++ b/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td
@@ -513,7 +513,7 @@ def : Pat<(frag GPR32:$src1, complex:$src2, complex:$src3),
// R00O-NEXT: GIM_Reject,
// R00O: // Label [[DEFAULT_NUM]]: @[[DEFAULT]]
// R00O-NEXT: GIM_Reject,
-// R00O-NEXT: }; // Size: 1840 bytes
+// R00O-NEXT: }; // Size: 1844 bytes
def INSNBOB : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2, GPR32:$src3, GPR32:$src4),
[(set GPR32:$dst,
>From a8569f2498b70e7475b963a145abc4100f9a7788 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <mat646 at gmail.com>
Date: Sun, 20 Apr 2025 17:14:44 +0000
Subject: [PATCH 2/5] Adjust targets for `G_POISON`
---
.../AArch64/GISel/AArch64InstructionSelector.cpp | 12 +++++++++---
.../Target/AArch64/GISel/AArch64LegalizerInfo.cpp | 2 +-
.../AArch64/GISel/AArch64PostLegalizerLowering.cpp | 5 +++--
llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp | 1 +
llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 3 ++-
.../Target/RISCV/GISel/RISCVInstructionSelector.cpp | 4 +++-
llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 5 +++--
.../lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp | 3 ++-
llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp | 7 +++++--
llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp | 3 ++-
llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp | 3 ++-
llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp | 12 ++++++++----
llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp | 2 +-
llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp | 1 +
14 files changed, 43 insertions(+), 20 deletions(-)
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 2afd24555b28c..2daf3ba392cde 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -3594,7 +3594,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
return selectIntrinsic(I, MRI);
case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
return selectIntrinsicWithSideEffects(I, MRI);
- case TargetOpcode::G_IMPLICIT_DEF: {
+ case TargetOpcode::G_IMPLICIT_DEF:
+ case TargetOpcode::G_POISON: {
I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
const Register DstReg = I.getOperand(0).getReg();
@@ -5860,8 +5861,13 @@ bool AArch64InstructionSelector::tryOptBuildVecToSubregToReg(
const RegisterBank &DstRB = *RBI.getRegBank(Dst, MRI, TRI);
if (EltRB != DstRB)
return false;
- if (any_of(drop_begin(I.operands(), 2), [&MRI](const MachineOperand &Op) {
- return !getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, Op.getReg(), MRI);
+ if (any_of(drop_begin(I.operands(), 2),
+ [&MRI](const MachineOperand &Op) {
+ return !getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, Op.getReg(),
+ MRI);
+ }) &&
+ any_of(drop_begin(I.operands(), 2), [&MRI](const MachineOperand &Op) {
+ return !getOpcodeDef(TargetOpcode::G_POISON, Op.getReg(), MRI);
}))
return false;
unsigned SubReg;
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index c36b20badfc09..5248e535f6f0e 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -92,7 +92,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
const bool HasSVE = ST.hasSVE();
getActionDefinitionsBuilder(
- {G_IMPLICIT_DEF, G_FREEZE, G_CONSTANT_FOLD_BARRIER})
+ {G_IMPLICIT_DEF, G_POISON, G_FREEZE, G_CONSTANT_FOLD_BARRIER})
.legalFor({p0, s8, s16, s32, s64})
.legalFor({v2s8, v4s8, v8s8, v16s8, v2s16, v4s16, v8s16, v2s32, v4s32,
v2s64, v2p0})
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
index 4785c7b68d94d..9b3dd18db8810 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
@@ -266,9 +266,10 @@ bool matchDupFromInsertVectorElt(int Lane, MachineInstr &MI,
MI.getOperand(1).getReg(), MRI);
if (!InsMI)
return false;
- // Match the undef vector operand.
+ // Match the undef/poison vector operand.
if (!getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, InsMI->getOperand(1).getReg(),
- MRI))
+ MRI) &&
+ !getOpcodeDef(TargetOpcode::G_POISON, InsMI->getOperand(1).getReg(), MRI))
return false;
// Match the index constant 0.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 6ef7505ec6f62..14d264fb6caa0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -4047,6 +4047,7 @@ bool AMDGPUInstructionSelector::select(MachineInstr &I) {
case TargetOpcode::G_BUILD_VECTOR_TRUNC:
return selectG_BUILD_VECTOR(I);
case TargetOpcode::G_IMPLICIT_DEF:
+ case TargetOpcode::G_POISON:
return selectG_IMPLICIT_DEF(I);
case TargetOpcode::G_INSERT:
return selectG_INSERT(I);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 1d0e81db5a5db..61d8e845fce10 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -477,7 +477,8 @@ AMDGPURegisterBankInfo::getInstrAlternativeMappings(
InstructionMappings AltMappings;
switch (MI.getOpcode()) {
case TargetOpcode::G_CONSTANT:
- case TargetOpcode::G_IMPLICIT_DEF: {
+ case TargetOpcode::G_IMPLICIT_DEF:
+ case TargetOpcode::G_POISON: {
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
if (Size == 1) {
static const OpRegBankEntry<1> Table[3] = {
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index f83c2b6da8923..e548c63fc28d5 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -812,6 +812,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
return true;
}
case TargetOpcode::G_IMPLICIT_DEF:
+ case TargetOpcode::G_POISON:
return selectImplicitDef(MI, MIB);
case TargetOpcode::G_UNMERGE_VALUES:
return selectUnmergeValues(MI, MIB);
@@ -1030,7 +1031,8 @@ bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
MachineIRBuilder &MIB) const {
- assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
+ assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
+ MI.getOpcode() == TargetOpcode::G_POISON);
const Register DstReg = MI.getOperand(0).getReg();
const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index e88f33d6859ec..db017c082fce7 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -255,7 +255,7 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
// TODO: transform illegal vector types into legal vector type
// TODO: Merge with G_FREEZE?
getActionDefinitionsBuilder(
- {G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER})
+ {G_IMPLICIT_DEF, G_POISON, G_CONSTANT_FOLD_BARRIER})
.legalFor({s32, sXLen, p0})
.legalIf(typeIsLegalBoolVec(0, BoolVecTys, ST))
.legalIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST))
@@ -1172,7 +1172,8 @@ bool RISCVLegalizerInfo::legalizeInsertSubvector(MachineInstr &MI,
LLT LitTy = MRI.getType(LitVec);
if (Idx == 0 ||
- MRI.getVRegDef(BigVec)->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
+ MRI.getVRegDef(BigVec)->getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
+ MRI.getVRegDef(BigVec)->getOpcode() == TargetOpcode::G_POISON)
return true;
// We don't have the ability to slide mask vectors up indexed by their i1
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index a082b18867666..280ac780da458 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -288,7 +288,8 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case TargetOpcode::G_ZEXTLOAD:
return getInstructionMapping(DefaultMappingID, /*Cost=*/1, GPRValueMapping,
NumOperands);
- case TargetOpcode::G_IMPLICIT_DEF: {
+ case TargetOpcode::G_IMPLICIT_DEF:
+ case TargetOpcode::G_POISON: {
Register Dst = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(Dst);
unsigned DstMinSize = DstTy.getSizeInBits().getKnownMinValue();
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 79f6b43f3aded..1dfb2bc6f407e 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -548,7 +548,8 @@ bool SPIRVInstructionSelector::select(MachineInstr &I) {
Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
- I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
+ I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
+ I.getOpcode() == TargetOpcode::G_POISON);
if (spvSelect(ResVReg, ResType, I)) {
if (HasDefs) // Make all vregs 64 bits (for SPIR-V IDs).
for (unsigned i = 0; i < I.getNumDefs(); ++i)
@@ -598,6 +599,7 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
case TargetOpcode::G_GLOBAL_VALUE:
return selectGlobalValue(ResVReg, I);
case TargetOpcode::G_IMPLICIT_DEF:
+ case TargetOpcode::G_POISON:
return selectOpUndef(ResVReg, ResType, I);
case TargetOpcode::G_FREEZE:
return selectFreeze(ResVReg, ResType, I);
@@ -2326,7 +2328,8 @@ bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
case SPIRV::ASSIGN_TYPE:
if (MachineInstr *AssignToDef =
MRI->getVRegDef(Def->getOperand(1).getReg())) {
- if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
+ if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
+ AssignToDef->getOpcode() == TargetOpcode::G_POISON)
Reg = Def->getOperand(2).getReg();
}
break;
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
index 578e82881f6e8..906a16ffcdf49 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
@@ -222,7 +222,8 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
all(typeInSet(0, allPtrsScalarsAndVectors),
typeInSet(1, allPtrsScalarsAndVectors)));
- getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_FREEZE}).alwaysLegal();
+ getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_POISON, G_FREEZE})
+ .alwaysLegal();
getActionDefinitionsBuilder({G_STACKSAVE, G_STACKRESTORE}).alwaysLegal();
diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
index 3fcff3dd8f553..e288803eebf23 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
@@ -93,7 +93,8 @@ addConstantsToTrack(MachineFunction &MF, SPIRVGlobalRegistry *GR,
if (SrcMI)
GR->add(Const, SrcMI);
if (SrcMI && (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT ||
- SrcMI->getOpcode() == TargetOpcode::G_IMPLICIT_DEF))
+ SrcMI->getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
+ SrcMI->getOpcode() == TargetOpcode::G_POISON))
TargetExtConstTypes[SrcMI] = Const->getType();
if (Const->isNullValue()) {
MachineBasicBlock &DepMBB = MF.front();
diff --git a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
index 64a1fa1780a77..3660303e7cc47 100644
--- a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
@@ -113,7 +113,8 @@ class X86InstructionSelector : public InstructionSelector {
const TargetRegisterClass *SrcRC) const;
bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
- bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
+ bool selectImplicitDefOrPoisonOrPHI(MachineInstr &I,
+ MachineRegisterInfo &MRI) const;
bool selectMulDivRem(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
bool selectSelect(MachineInstr &I, MachineRegisterInfo &MRI,
@@ -428,8 +429,9 @@ bool X86InstructionSelector::select(MachineInstr &I) {
case TargetOpcode::G_BRCOND:
return selectCondBranch(I, MRI, MF);
case TargetOpcode::G_IMPLICIT_DEF:
+ case TargetOpcode::G_POISON:
case TargetOpcode::G_PHI:
- return selectImplicitDefOrPHI(I, MRI);
+ return selectImplicitDefOrPoisonOrPHI(I, MRI);
case TargetOpcode::G_MUL:
case TargetOpcode::G_SMULH:
case TargetOpcode::G_UMULH:
@@ -1585,9 +1587,10 @@ bool X86InstructionSelector::materializeFP(MachineInstr &I,
return true;
}
-bool X86InstructionSelector::selectImplicitDefOrPHI(
+bool X86InstructionSelector::selectImplicitDefOrPoisonOrPHI(
MachineInstr &I, MachineRegisterInfo &MRI) const {
assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
+ I.getOpcode() == TargetOpcode::G_POISON ||
I.getOpcode() == TargetOpcode::G_PHI) &&
"unexpected instruction");
@@ -1604,7 +1607,8 @@ bool X86InstructionSelector::selectImplicitDefOrPHI(
}
}
- if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
+ if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
+ I.getOpcode() == TargetOpcode::G_POISON)
I.setDesc(TII.get(X86::IMPLICIT_DEF));
else
I.setDesc(TII.get(X86::PHI));
diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
index ba9fa254a477a..560e296773173 100644
--- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
+++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
@@ -82,7 +82,7 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
// todo: AVX512 bool vector predicate types
// implicit/constants
- getActionDefinitionsBuilder(G_IMPLICIT_DEF)
+ getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_POISON})
.legalIf([=](const LegalityQuery &Query) -> bool {
// 32/64-bits needs support for s64/s128 to handle cases:
// s64 = EXTEND (G_IMPLICIT_DEF s32) -> s64 = G_IMPLICIT_DEF
diff --git a/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp b/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp
index 0baca81494694..cde06fe1f164e 100644
--- a/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp
+++ b/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp
@@ -404,6 +404,7 @@ X86RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
case TargetOpcode::G_LOAD:
case TargetOpcode::G_STORE:
+ case TargetOpcode::G_POISON:
case TargetOpcode::G_IMPLICIT_DEF: {
// we going to try to map 32/64/80 bit to PMI_FP32/PMI_FP64/PMI_FP80
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
>From 59a800d8a3698f70cb887bdf25281667b0af8001 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <mat646 at gmail.com>
Date: Sun, 20 Apr 2025 19:00:47 +0000
Subject: [PATCH 3/5] First patch for failing tests
---
.../AMDGPU/AMDGPUInstructionSelector.cpp | 4 +-
.../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 2 +-
.../Target/AMDGPU/AMDGPURegBankLegalize.cpp | 2 +-
.../AMDGPU/AMDGPURegBankLegalizeRules.cpp | 6 +-
.../Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 8 +-
.../GlobalISel/arm64-irtranslator-gep.ll | 4 +-
.../GlobalISel/legalizer-info-validation.mir | 4 +
.../AArch64/GlobalISel/ret-vec-promote.ll | 22 +-
...calling-conv-ilp32-ilp32f-ilp32d-common.ll | 4 +-
.../calling-conv-lp64-lp64f-lp64d-common.ll | 4 +-
.../GlobalISel/irtranslator/insertelement.ll | 300 +++++++++---------
.../GlobalISel/legalizer-info-validation.mir | 4 +
12 files changed, 189 insertions(+), 175 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 14d264fb6caa0..868529700a486 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -785,7 +785,8 @@ bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR(MachineInstr &MI) const {
// TODO: This should probably be a combine somewhere
// (build_vector $src0, undef) -> copy $src0
MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
- if (Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
+ if (Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
+ Src1Def->getOpcode() == AMDGPU::G_POISON) {
MI.setDesc(TII.get(AMDGPU::COPY));
MI.removeOperand(2);
const auto &RC =
@@ -4955,6 +4956,7 @@ AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
// FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
// drop this.
if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
+ AddrDef->MI->getOpcode() == AMDGPU::G_POISON ||
AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
return std::nullopt;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index beb6432170970..d26ea200c0a57 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -901,7 +901,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.legalFor({S32, S64, S16})
.clampScalar(0, S16, S64);
- getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_FREEZE})
+ getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_POISON, G_FREEZE})
.legalIf(isRegisterClassType(ST, 0))
// s1 and s16 are special cases because they have legal operations on
// them, but don't really occupy registers in the normal way.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalize.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalize.cpp
index ad6a0772fe8b6..bd5080b2c1400 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalize.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalize.cpp
@@ -316,7 +316,7 @@ bool AMDGPURegBankLegalize::runOnMachineFunction(MachineFunction &MF) {
}
if ((Opc == AMDGPU::G_CONSTANT || Opc == AMDGPU::G_FCONSTANT ||
- Opc == AMDGPU::G_IMPLICIT_DEF)) {
+ Opc == AMDGPU::G_IMPLICIT_DEF || Opc == AMDGPU::G_POISON)) {
Register Dst = MI->getOperand(0).getReg();
// Non S1 types are trivially accepted.
if (MRI.getType(Dst) != LLT::scalar(1)) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
index 6ee15709d2fa6..7ebaed385d02d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
@@ -450,9 +450,11 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST,
.Uni(S64, {{Sgpr64}, {Sgpr64, Sgpr32}})
.Div(S64, {{Vgpr64}, {Vgpr64, Vgpr32}});
- // Note: we only write S1 rules for G_IMPLICIT_DEF, G_CONSTANT, G_FCONSTANT
+ // Note: we only write S1 rules for G_IMPLICIT_DEF, G_POISON,
+ // G_CONSTANT, G_FCONSTANT
// and G_FREEZE here, rest is trivially regbankselected earlier
- addRulesForGOpcs({G_IMPLICIT_DEF}).Any({{UniS1}, {{Sgpr32Trunc}, {}}});
+ addRulesForGOpcs({G_IMPLICIT_DEF, G_POISON})
+ .Any({{UniS1}, {{Sgpr32Trunc}, {}}});
addRulesForGOpcs({G_CONSTANT})
.Any({{UniS1, _}, {{Sgpr32Trunc}, {None}, UniCstExt}});
addRulesForGOpcs({G_FREEZE}).Any({{DivS1}, {{Vcc}, {Vcc}}});
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 61d8e845fce10..e3b930bb50c22 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -2211,7 +2211,8 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
MachineRegisterInfo &MRI = OpdMapper.getMRI();
switch (Opc) {
case AMDGPU::G_CONSTANT:
- case AMDGPU::G_IMPLICIT_DEF: {
+ case AMDGPU::G_IMPLICIT_DEF:
+ case AMDGPU::G_POISON: {
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
if (DstTy != LLT::scalar(1))
@@ -2231,7 +2232,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
LLVMContext &Ctx = B.getMF().getFunction().getContext();
MI.getOperand(0).setReg(NewDstReg);
- if (Opc != AMDGPU::G_IMPLICIT_DEF) {
+ if (Opc != AMDGPU::G_IMPLICIT_DEF && Opc != AMDGPU::G_POISON) {
uint64_t ConstVal = MI.getOperand(1).getCImm()->getZExtValue();
MI.getOperand(1).setCImm(
ConstantInt::get(IntegerType::getInt32Ty(Ctx), ConstVal));
@@ -4124,7 +4125,8 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[4] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 64);
break;
}
- case AMDGPU::G_IMPLICIT_DEF: {
+ case AMDGPU::G_IMPLICIT_DEF:
+ case AMDGPU::G_POISON: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
break;
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
index f0d9aa4dcd25d..fabdd4e2262cf 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll
@@ -59,7 +59,7 @@ define void @ossfuzz65052() {
; O0: bb.1 (%ir-block.0):
; O0-NEXT: successors: %bb.2(0x80000000)
; O0-NEXT: {{ $}}
- ; O0-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+ ; O0-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_POISON
; O0-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -170141183460469231731687303715884105728
; O0-NEXT: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[C]](s128)
; O0-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
@@ -77,7 +77,7 @@ define void @ossfuzz65052() {
; O3: bb.1 (%ir-block.0):
; O3-NEXT: successors: %bb.2(0x80000000)
; O3-NEXT: {{ $}}
- ; O3-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+ ; O3-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_POISON
; O3-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -170141183460469231731687303715884105728
; O3-NEXT: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[C]](s128)
; O3-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index 0260e65520774..273a4ba8d8bf6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -82,6 +82,10 @@
# DEBUG-NEXT: .. the first uncovered type index: {{[0-9]+}}, OK
# DEBUG-NEXT: .. the first uncovered imm index: {{[0-9]+}}, OK
#
+# DEBUG-NEXT: G_POISON (opcode {{[0-9]+}}): 1 type index, 0 imm indices
+# DEBUG-NEXT: .. the first uncovered type index: {{[0-9]+}}, OK
+# DEBUG-NEXT: .. the first uncovered imm index: {{[0-9]+}}, OK
+#
# DEBUG-NEXT: G_PHI (opcode {{[0-9]+}}): 1 type index, 0 imm indices
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll b/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll
index 04734caf01ef7..d0a413e2f2f28 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll
@@ -41,7 +41,7 @@ define <2 x i12> @ret_v2i12(i12 %v1, i12 %v2) {
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s12) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s12) = G_TRUNC [[COPY1]](s32)
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<2 x s12>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<2 x s12>) = G_POISON
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<2 x s12>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s12), [[C]](s64)
@@ -63,7 +63,7 @@ define <3 x i12> @ret_v3i12(i12 %v1, i12 %v2) {
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s12) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s12) = G_TRUNC [[COPY1]](s32)
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<3 x s12>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<3 x s12>) = G_POISON
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@@ -93,7 +93,7 @@ define <4 x i12> @ret_v4i12(i12 %v1, i12 %v2) {
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s12) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s12) = G_TRUNC [[COPY1]](s32)
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s12>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s12>) = G_POISON
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@@ -121,7 +121,7 @@ define <5 x i12> @ret_v5i12(i12 %v1, i12 %v2) {
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s12) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s12) = G_TRUNC [[COPY1]](s32)
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<5 x s12>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<5 x s12>) = G_POISON
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@@ -161,7 +161,7 @@ define <6 x i12> @ret_v6i12(i12 %v1, i12 %v2) {
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s12) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s12) = G_TRUNC [[COPY1]](s32)
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<6 x s12>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<6 x s12>) = G_POISON
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@@ -206,7 +206,7 @@ define <7 x i12> @ret_v7i12(i12 %v1, i12 %v2) {
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s12) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s12) = G_TRUNC [[COPY1]](s32)
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<7 x s12>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<7 x s12>) = G_POISON
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@@ -256,7 +256,7 @@ define <8 x i12> @ret_v8i12(i12 %v1, i12 %v2) {
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s12) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s12) = G_TRUNC [[COPY1]](s32)
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<8 x s12>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<8 x s12>) = G_POISON
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@@ -297,7 +297,7 @@ define <12 x i12> @ret_v12i12(i12 %v1, i12 %v2) {
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s12) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s12) = G_TRUNC [[COPY2]](s32)
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<12 x s12>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<12 x s12>) = G_POISON
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@@ -375,7 +375,7 @@ define <2 x i100> @ret_v2i100(i100 %v1, i100 %v2) {
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
; CHECK-NEXT: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY2]](s64), [[COPY3]](s64)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s100) = G_TRUNC [[MV1]](s128)
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<2 x s100>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<2 x s100>) = G_POISON
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<2 x s100>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s100), [[C]](s64)
@@ -405,7 +405,7 @@ define <3 x i100> @ret_v3i100(i100 %v1, i100 %v2) {
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
; CHECK-NEXT: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY2]](s64), [[COPY3]](s64)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s100) = G_TRUNC [[MV1]](s128)
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<3 x s100>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<3 x s100>) = G_POISON
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@@ -440,7 +440,7 @@ define <4 x i100> @ret_v4i100(i100 %v1, i100 %v2) {
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
; CHECK-NEXT: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY2]](s64), [[COPY3]](s64)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s100) = G_TRUNC [[MV1]](s128)
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s100>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s100>) = G_POISON
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index 3fcaa81e1a552..1b7fb21bba70a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -1383,9 +1383,9 @@ define %struct.large2 @callee_large_struct_ret2() nounwind {
; RV32I-NEXT: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
- ; RV32I-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_POISON
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
- ; RV32I-NEXT: [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:_(s16) = G_POISON
; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
; RV32I-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 3
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
index 17c6e55fa8d2c..5f1c0ad514acd 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -1156,8 +1156,8 @@ define %struct.large2 @callee_large_struct_ret2() nounwind {
; RV64I-NEXT: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
- ; RV64I-NEXT: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
- ; RV64I-NEXT: [[DEF1:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF:%[0-9]+]]:_(s64) = G_POISON
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:_(s128) = G_POISON
; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[DEF]](s64)
; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; RV64I-NEXT: [[C1:%[0-9]+]]:_(s128) = G_CONSTANT i128 2
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll
index a1347d2306cad..10d753ff033d4 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll
@@ -7,7 +7,7 @@
define <vscale x 1 x i1> @insertelement_nxv1i1_0() {
; RV32-LABEL: name: insertelement_nxv1i1_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
@@ -16,7 +16,7 @@ define <vscale x 1 x i1> @insertelement_nxv1i1_0() {
;
; RV64-LABEL: name: insertelement_nxv1i1_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s64)
@@ -29,7 +29,7 @@ define <vscale x 1 x i1> @insertelement_nxv1i1_0() {
define <vscale x 1 x i1> @insertelement_nxv1i1_1() {
; RV32-LABEL: name: insertelement_nxv1i1_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
@@ -38,7 +38,7 @@ define <vscale x 1 x i1> @insertelement_nxv1i1_1() {
;
; RV64-LABEL: name: insertelement_nxv1i1_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s64)
@@ -56,7 +56,7 @@ define <vscale x 1 x i1> @insertelement_nxv1i1_2(i1 %x, i32 %idx) {
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_POISON
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[COPY1]](s32)
; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
@@ -69,7 +69,7 @@ define <vscale x 1 x i1> @insertelement_nxv1i1_2(i1 %x, i32 %idx) {
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_POISON
; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC1]](s32)
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[ZEXT]](s64)
; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
@@ -81,7 +81,7 @@ define <vscale x 1 x i1> @insertelement_nxv1i1_2(i1 %x, i32 %idx) {
define <vscale x 2 x i1> @insertelement_nxv2i1_0() {
; RV32-LABEL: name: insertelement_nxv2i1_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
@@ -90,7 +90,7 @@ define <vscale x 2 x i1> @insertelement_nxv2i1_0() {
;
; RV64-LABEL: name: insertelement_nxv2i1_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s64)
@@ -103,7 +103,7 @@ define <vscale x 2 x i1> @insertelement_nxv2i1_0() {
define <vscale x 2 x i1> @insertelement_nxv2i1_1() {
; RV32-LABEL: name: insertelement_nxv2i1_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
@@ -112,7 +112,7 @@ define <vscale x 2 x i1> @insertelement_nxv2i1_1() {
;
; RV64-LABEL: name: insertelement_nxv2i1_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s64)
@@ -130,7 +130,7 @@ define <vscale x 2 x i1> @insertelement_nxv2i1_2(i1 %x, i32 %idx) {
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_POISON
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[COPY1]](s32)
; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
@@ -143,7 +143,7 @@ define <vscale x 2 x i1> @insertelement_nxv2i1_2(i1 %x, i32 %idx) {
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_POISON
; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC1]](s32)
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[ZEXT]](s64)
; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
@@ -155,7 +155,7 @@ define <vscale x 2 x i1> @insertelement_nxv2i1_2(i1 %x, i32 %idx) {
define <vscale x 4 x i1> @insertelement_nxv4i1_0() {
; RV32-LABEL: name: insertelement_nxv4i1_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
@@ -164,7 +164,7 @@ define <vscale x 4 x i1> @insertelement_nxv4i1_0() {
;
; RV64-LABEL: name: insertelement_nxv4i1_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s64)
@@ -177,7 +177,7 @@ define <vscale x 4 x i1> @insertelement_nxv4i1_0() {
define <vscale x 4 x i1> @insertelement_nxv4i1_1() {
; RV32-LABEL: name: insertelement_nxv4i1_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
@@ -186,7 +186,7 @@ define <vscale x 4 x i1> @insertelement_nxv4i1_1() {
;
; RV64-LABEL: name: insertelement_nxv4i1_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s64)
@@ -203,7 +203,7 @@ define <vscale x 4 x i1> @insertelement_nxv4i1_2(i1 %x) {
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
@@ -215,7 +215,7 @@ define <vscale x 4 x i1> @insertelement_nxv4i1_2(i1 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s64)
; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
@@ -227,7 +227,7 @@ define <vscale x 4 x i1> @insertelement_nxv4i1_2(i1 %x) {
define <vscale x 8 x i1> @insertelement_nxv8i1_0() {
; RV32-LABEL: name: insertelement_nxv8i1_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
@@ -236,7 +236,7 @@ define <vscale x 8 x i1> @insertelement_nxv8i1_0() {
;
; RV64-LABEL: name: insertelement_nxv8i1_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s64)
@@ -249,7 +249,7 @@ define <vscale x 8 x i1> @insertelement_nxv8i1_0() {
define <vscale x 8 x i1> @insertelement_nxv8i1_1() {
; RV32-LABEL: name: insertelement_nxv8i1_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
@@ -258,7 +258,7 @@ define <vscale x 8 x i1> @insertelement_nxv8i1_1() {
;
; RV64-LABEL: name: insertelement_nxv8i1_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s64)
@@ -276,7 +276,7 @@ define <vscale x 8 x i1> @insertelement_nxv8i1_2(i1 %x, i32 %idx) {
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_POISON
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[COPY1]](s32)
; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
@@ -289,7 +289,7 @@ define <vscale x 8 x i1> @insertelement_nxv8i1_2(i1 %x, i32 %idx) {
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_POISON
; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC1]](s32)
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[ZEXT]](s64)
; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
@@ -301,7 +301,7 @@ define <vscale x 8 x i1> @insertelement_nxv8i1_2(i1 %x, i32 %idx) {
define <vscale x 16 x i1> @insertelement_nxv16i1_0() {
; RV32-LABEL: name: insertelement_nxv16i1_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
@@ -310,7 +310,7 @@ define <vscale x 16 x i1> @insertelement_nxv16i1_0() {
;
; RV64-LABEL: name: insertelement_nxv16i1_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s64)
@@ -323,7 +323,7 @@ define <vscale x 16 x i1> @insertelement_nxv16i1_0() {
define <vscale x 16 x i1> @insertelement_nxv16i1_1() {
; RV32-LABEL: name: insertelement_nxv16i1_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
@@ -332,7 +332,7 @@ define <vscale x 16 x i1> @insertelement_nxv16i1_1() {
;
; RV64-LABEL: name: insertelement_nxv16i1_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s64)
@@ -350,7 +350,7 @@ define <vscale x 16 x i1> @insertelement_nxv16i1_2(i1 %x, i32 %idx) {
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_POISON
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[COPY1]](s32)
; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
@@ -363,7 +363,7 @@ define <vscale x 16 x i1> @insertelement_nxv16i1_2(i1 %x, i32 %idx) {
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_POISON
; RV64-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC1]](s32)
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[ZEXT]](s64)
; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
@@ -403,7 +403,7 @@ define <vscale x 4 x i1> @insertelement_nxv4i1_3(<vscale x 4 x i1> %v, i1 %x) {
define <vscale x 1 x i8> @insertelement_nxv1i8_0() {
; RV32-LABEL: name: insertelement_nxv1i8_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
@@ -412,7 +412,7 @@ define <vscale x 1 x i8> @insertelement_nxv1i8_0() {
;
; RV64-LABEL: name: insertelement_nxv1i8_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s64)
@@ -425,7 +425,7 @@ define <vscale x 1 x i8> @insertelement_nxv1i8_0() {
define <vscale x 1 x i8> @insertelement_nxv1i8_1() {
; RV32-LABEL: name: insertelement_nxv1i8_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
@@ -434,7 +434,7 @@ define <vscale x 1 x i8> @insertelement_nxv1i8_1() {
;
; RV64-LABEL: name: insertelement_nxv1i8_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s64)
@@ -451,7 +451,7 @@ define <vscale x 1 x i8> @insertelement_nxv1i8_2(i8 %x) {
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
@@ -463,7 +463,7 @@ define <vscale x 1 x i8> @insertelement_nxv1i8_2(i8 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s64)
; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
@@ -475,7 +475,7 @@ define <vscale x 1 x i8> @insertelement_nxv1i8_2(i8 %x) {
define <vscale x 2 x i8> @insertelement_nxv2i8_0() {
; RV32-LABEL: name: insertelement_nxv2i8_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
@@ -484,7 +484,7 @@ define <vscale x 2 x i8> @insertelement_nxv2i8_0() {
;
; RV64-LABEL: name: insertelement_nxv2i8_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s64)
@@ -497,7 +497,7 @@ define <vscale x 2 x i8> @insertelement_nxv2i8_0() {
define <vscale x 2 x i8> @insertelement_nxv2i8_1() {
; RV32-LABEL: name: insertelement_nxv2i8_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
@@ -506,7 +506,7 @@ define <vscale x 2 x i8> @insertelement_nxv2i8_1() {
;
; RV64-LABEL: name: insertelement_nxv2i8_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s64)
@@ -523,7 +523,7 @@ define <vscale x 2 x i8> @insertelement_nxv2i8_2(i8 %x) {
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
@@ -535,7 +535,7 @@ define <vscale x 2 x i8> @insertelement_nxv2i8_2(i8 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s64)
; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
@@ -547,7 +547,7 @@ define <vscale x 2 x i8> @insertelement_nxv2i8_2(i8 %x) {
define <vscale x 4 x i8> @insertelement_nxv4i8_0() {
; RV32-LABEL: name: insertelement_nxv4i8_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
@@ -556,7 +556,7 @@ define <vscale x 4 x i8> @insertelement_nxv4i8_0() {
;
; RV64-LABEL: name: insertelement_nxv4i8_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s64)
@@ -569,7 +569,7 @@ define <vscale x 4 x i8> @insertelement_nxv4i8_0() {
define <vscale x 4 x i8> @insertelement_nxv4i8_1() {
; RV32-LABEL: name: insertelement_nxv4i8_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
@@ -578,7 +578,7 @@ define <vscale x 4 x i8> @insertelement_nxv4i8_1() {
;
; RV64-LABEL: name: insertelement_nxv4i8_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s64)
@@ -595,7 +595,7 @@ define <vscale x 4 x i8> @insertelement_nxv4i8_2(i8 %x) {
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
@@ -607,7 +607,7 @@ define <vscale x 4 x i8> @insertelement_nxv4i8_2(i8 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s64)
; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
@@ -619,7 +619,7 @@ define <vscale x 4 x i8> @insertelement_nxv4i8_2(i8 %x) {
define <vscale x 8 x i8> @insertelement_nxv8i8_0() {
; RV32-LABEL: name: insertelement_nxv8i8_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
@@ -628,7 +628,7 @@ define <vscale x 8 x i8> @insertelement_nxv8i8_0() {
;
; RV64-LABEL: name: insertelement_nxv8i8_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s64)
@@ -641,7 +641,7 @@ define <vscale x 8 x i8> @insertelement_nxv8i8_0() {
define <vscale x 8 x i8> @insertelement_nxv8i8_1() {
; RV32-LABEL: name: insertelement_nxv8i8_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
@@ -650,7 +650,7 @@ define <vscale x 8 x i8> @insertelement_nxv8i8_1() {
;
; RV64-LABEL: name: insertelement_nxv8i8_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s64)
@@ -667,7 +667,7 @@ define <vscale x 8 x i8> @insertelement_nxv8i8_2(i8 %x) {
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
@@ -679,7 +679,7 @@ define <vscale x 8 x i8> @insertelement_nxv8i8_2(i8 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s64)
; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
@@ -691,7 +691,7 @@ define <vscale x 8 x i8> @insertelement_nxv8i8_2(i8 %x) {
define <vscale x 16 x i8> @insertelement_nxv16i8_0() {
; RV32-LABEL: name: insertelement_nxv16i8_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
@@ -700,7 +700,7 @@ define <vscale x 16 x i8> @insertelement_nxv16i8_0() {
;
; RV64-LABEL: name: insertelement_nxv16i8_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s64)
@@ -713,7 +713,7 @@ define <vscale x 16 x i8> @insertelement_nxv16i8_0() {
define <vscale x 16 x i8> @insertelement_nxv16i8_1() {
; RV32-LABEL: name: insertelement_nxv16i8_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
@@ -722,7 +722,7 @@ define <vscale x 16 x i8> @insertelement_nxv16i8_1() {
;
; RV64-LABEL: name: insertelement_nxv16i8_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s64)
@@ -742,7 +742,7 @@ define <vscale x 16 x i8> @insertelement_nxv16i8_2(i8 %x, i64 %idx) {
; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_POISON
; RV32-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s64)
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[TRUNC1]](s32)
; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
@@ -755,7 +755,7 @@ define <vscale x 16 x i8> @insertelement_nxv16i8_2(i8 %x, i64 %idx) {
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_POISON
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[COPY1]](s64)
; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
; RV64-NEXT: PseudoRET implicit $v8m2
@@ -794,7 +794,7 @@ define <vscale x 4 x i8> @insertelement_nxv4i8_3(<vscale x 4 x i8> %v, i8 %x) {
define <vscale x 1 x i16> @insertelement_nxv1i16_0() {
; RV32-LABEL: name: insertelement_nxv1i16_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
@@ -803,7 +803,7 @@ define <vscale x 1 x i16> @insertelement_nxv1i16_0() {
;
; RV64-LABEL: name: insertelement_nxv1i16_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s64)
@@ -816,7 +816,7 @@ define <vscale x 1 x i16> @insertelement_nxv1i16_0() {
define <vscale x 1 x i16> @insertelement_nxv1i16_1() {
; RV32-LABEL: name: insertelement_nxv1i16_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
@@ -825,7 +825,7 @@ define <vscale x 1 x i16> @insertelement_nxv1i16_1() {
;
; RV64-LABEL: name: insertelement_nxv1i16_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s64)
@@ -842,7 +842,7 @@ define <vscale x 1 x i16> @insertelement_nxv1i16_2(i16 %x) {
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
@@ -854,7 +854,7 @@ define <vscale x 1 x i16> @insertelement_nxv1i16_2(i16 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s64)
; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
@@ -866,7 +866,7 @@ define <vscale x 1 x i16> @insertelement_nxv1i16_2(i16 %x) {
define <vscale x 2 x i16> @insertelement_nxv2i16_0() {
; RV32-LABEL: name: insertelement_nxv2i16_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
@@ -875,7 +875,7 @@ define <vscale x 2 x i16> @insertelement_nxv2i16_0() {
;
; RV64-LABEL: name: insertelement_nxv2i16_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s64)
@@ -888,7 +888,7 @@ define <vscale x 2 x i16> @insertelement_nxv2i16_0() {
define <vscale x 2 x i16> @insertelement_nxv2i16_1() {
; RV32-LABEL: name: insertelement_nxv2i16_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
@@ -897,7 +897,7 @@ define <vscale x 2 x i16> @insertelement_nxv2i16_1() {
;
; RV64-LABEL: name: insertelement_nxv2i16_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s64)
@@ -914,7 +914,7 @@ define <vscale x 2 x i16> @insertelement_nxv2i16_2(i16 %x) {
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
@@ -926,7 +926,7 @@ define <vscale x 2 x i16> @insertelement_nxv2i16_2(i16 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s64)
; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
@@ -938,7 +938,7 @@ define <vscale x 2 x i16> @insertelement_nxv2i16_2(i16 %x) {
define <vscale x 4 x i16> @insertelement_nxv4i16_0() {
; RV32-LABEL: name: insertelement_nxv4i16_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
@@ -947,7 +947,7 @@ define <vscale x 4 x i16> @insertelement_nxv4i16_0() {
;
; RV64-LABEL: name: insertelement_nxv4i16_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s64)
@@ -960,7 +960,7 @@ define <vscale x 4 x i16> @insertelement_nxv4i16_0() {
define <vscale x 4 x i16> @insertelement_nxv4i16_1() {
; RV32-LABEL: name: insertelement_nxv4i16_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
@@ -969,7 +969,7 @@ define <vscale x 4 x i16> @insertelement_nxv4i16_1() {
;
; RV64-LABEL: name: insertelement_nxv4i16_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s64)
@@ -986,7 +986,7 @@ define <vscale x 4 x i16> @insertelement_nxv4i16_2(i16 %x) {
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
@@ -998,7 +998,7 @@ define <vscale x 4 x i16> @insertelement_nxv4i16_2(i16 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s64)
; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
@@ -1010,7 +1010,7 @@ define <vscale x 4 x i16> @insertelement_nxv4i16_2(i16 %x) {
define <vscale x 8 x i16> @insertelement_nxv8i16_0() {
; RV32-LABEL: name: insertelement_nxv8i16_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
@@ -1019,7 +1019,7 @@ define <vscale x 8 x i16> @insertelement_nxv8i16_0() {
;
; RV64-LABEL: name: insertelement_nxv8i16_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s64)
@@ -1032,7 +1032,7 @@ define <vscale x 8 x i16> @insertelement_nxv8i16_0() {
define <vscale x 8 x i16> @insertelement_nxv8i16_1() {
; RV32-LABEL: name: insertelement_nxv8i16_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
@@ -1041,7 +1041,7 @@ define <vscale x 8 x i16> @insertelement_nxv8i16_1() {
;
; RV64-LABEL: name: insertelement_nxv8i16_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s64)
@@ -1058,7 +1058,7 @@ define <vscale x 8 x i16> @insertelement_nxv8i16_2(i16 %x) {
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
@@ -1070,7 +1070,7 @@ define <vscale x 8 x i16> @insertelement_nxv8i16_2(i16 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s64)
; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
@@ -1082,7 +1082,7 @@ define <vscale x 8 x i16> @insertelement_nxv8i16_2(i16 %x) {
define <vscale x 16 x i16> @insertelement_nxv16i16_0() {
; RV32-LABEL: name: insertelement_nxv16i16_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
@@ -1091,7 +1091,7 @@ define <vscale x 16 x i16> @insertelement_nxv16i16_0() {
;
; RV64-LABEL: name: insertelement_nxv16i16_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s64)
@@ -1104,7 +1104,7 @@ define <vscale x 16 x i16> @insertelement_nxv16i16_0() {
define <vscale x 16 x i16> @insertelement_nxv16i16_1() {
; RV32-LABEL: name: insertelement_nxv16i16_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
@@ -1113,7 +1113,7 @@ define <vscale x 16 x i16> @insertelement_nxv16i16_1() {
;
; RV64-LABEL: name: insertelement_nxv16i16_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s64)
@@ -1130,7 +1130,7 @@ define <vscale x 16 x i16> @insertelement_nxv16i16_2(i16 %x) {
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
@@ -1142,7 +1142,7 @@ define <vscale x 16 x i16> @insertelement_nxv16i16_2(i16 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s64)
; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
@@ -1182,7 +1182,7 @@ define <vscale x 4 x i16> @insertelement_nxv4i16(<vscale x 4 x i16> %v, i16 %x)
define <vscale x 1 x i32> @insertelement_nxv1i32_0() {
; RV32-LABEL: name: insertelement_nxv1i32_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
@@ -1190,7 +1190,7 @@ define <vscale x 1 x i32> @insertelement_nxv1i32_0() {
;
; RV64-LABEL: name: insertelement_nxv1i32_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s64)
@@ -1203,7 +1203,7 @@ define <vscale x 1 x i32> @insertelement_nxv1i32_0() {
define <vscale x 1 x i32> @insertelement_nxv1i32_1() {
; RV32-LABEL: name: insertelement_nxv1i32_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
@@ -1212,7 +1212,7 @@ define <vscale x 1 x i32> @insertelement_nxv1i32_1() {
;
; RV64-LABEL: name: insertelement_nxv1i32_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s64)
@@ -1228,7 +1228,7 @@ define <vscale x 1 x i32> @insertelement_nxv1i32_2(i32 %x) {
; RV32-NEXT: liveins: $x10
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
@@ -1240,7 +1240,7 @@ define <vscale x 1 x i32> @insertelement_nxv1i32_2(i32 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s64)
; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
@@ -1252,7 +1252,7 @@ define <vscale x 1 x i32> @insertelement_nxv1i32_2(i32 %x) {
define <vscale x 2 x i32> @insertelement_nxv2i32_0() {
; RV32-LABEL: name: insertelement_nxv2i32_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
@@ -1260,7 +1260,7 @@ define <vscale x 2 x i32> @insertelement_nxv2i32_0() {
;
; RV64-LABEL: name: insertelement_nxv2i32_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s64)
@@ -1273,7 +1273,7 @@ define <vscale x 2 x i32> @insertelement_nxv2i32_0() {
define <vscale x 2 x i32> @insertelement_nxv2i32_1() {
; RV32-LABEL: name: insertelement_nxv2i32_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
@@ -1282,7 +1282,7 @@ define <vscale x 2 x i32> @insertelement_nxv2i32_1() {
;
; RV64-LABEL: name: insertelement_nxv2i32_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s64)
@@ -1298,7 +1298,7 @@ define <vscale x 2 x i32> @insertelement_nxv2i32_2(i32 %x) {
; RV32-NEXT: liveins: $x10
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
@@ -1310,7 +1310,7 @@ define <vscale x 2 x i32> @insertelement_nxv2i32_2(i32 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s64)
; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
@@ -1322,7 +1322,7 @@ define <vscale x 2 x i32> @insertelement_nxv2i32_2(i32 %x) {
define <vscale x 4 x i32> @insertelement_nxv4i32_0() {
; RV32-LABEL: name: insertelement_nxv4i32_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
@@ -1330,7 +1330,7 @@ define <vscale x 4 x i32> @insertelement_nxv4i32_0() {
;
; RV64-LABEL: name: insertelement_nxv4i32_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s64)
@@ -1343,7 +1343,7 @@ define <vscale x 4 x i32> @insertelement_nxv4i32_0() {
define <vscale x 4 x i32> @insertelement_nxv4i32_1() {
; RV32-LABEL: name: insertelement_nxv4i32_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
@@ -1352,7 +1352,7 @@ define <vscale x 4 x i32> @insertelement_nxv4i32_1() {
;
; RV64-LABEL: name: insertelement_nxv4i32_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s64)
@@ -1368,7 +1368,7 @@ define <vscale x 4 x i32> @insertelement_nxv4i32_2(i32 %x) {
; RV32-NEXT: liveins: $x10
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
@@ -1380,7 +1380,7 @@ define <vscale x 4 x i32> @insertelement_nxv4i32_2(i32 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s64)
; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
@@ -1392,7 +1392,7 @@ define <vscale x 4 x i32> @insertelement_nxv4i32_2(i32 %x) {
define <vscale x 8 x i32> @insertelement_nxv8i32_0() {
; RV32-LABEL: name: insertelement_nxv8i32_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
@@ -1400,7 +1400,7 @@ define <vscale x 8 x i32> @insertelement_nxv8i32_0() {
;
; RV64-LABEL: name: insertelement_nxv8i32_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s64)
@@ -1413,7 +1413,7 @@ define <vscale x 8 x i32> @insertelement_nxv8i32_0() {
define <vscale x 8 x i32> @insertelement_nxv8i32_1() {
; RV32-LABEL: name: insertelement_nxv8i32_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
@@ -1422,7 +1422,7 @@ define <vscale x 8 x i32> @insertelement_nxv8i32_1() {
;
; RV64-LABEL: name: insertelement_nxv8i32_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s64)
@@ -1438,7 +1438,7 @@ define <vscale x 8 x i32> @insertelement_nxv8i32_2(i32 %x) {
; RV32-NEXT: liveins: $x10
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
@@ -1450,7 +1450,7 @@ define <vscale x 8 x i32> @insertelement_nxv8i32_2(i32 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s64)
; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
@@ -1462,7 +1462,7 @@ define <vscale x 8 x i32> @insertelement_nxv8i32_2(i32 %x) {
define <vscale x 16 x i32> @insertelement_nxv16i32_0() {
; RV32-LABEL: name: insertelement_nxv16i32_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
@@ -1470,7 +1470,7 @@ define <vscale x 16 x i32> @insertelement_nxv16i32_0() {
;
; RV64-LABEL: name: insertelement_nxv16i32_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s64)
@@ -1483,7 +1483,7 @@ define <vscale x 16 x i32> @insertelement_nxv16i32_0() {
define <vscale x 16 x i32> @insertelement_nxv16i32_1() {
; RV32-LABEL: name: insertelement_nxv16i32_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
@@ -1492,7 +1492,7 @@ define <vscale x 16 x i32> @insertelement_nxv16i32_1() {
;
; RV64-LABEL: name: insertelement_nxv16i32_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s64)
@@ -1508,7 +1508,7 @@ define <vscale x 16 x i32> @insertelement_nxv16i32_2(i32 %x) {
; RV32-NEXT: liveins: $x10
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
@@ -1520,7 +1520,7 @@ define <vscale x 16 x i32> @insertelement_nxv16i32_2(i32 %x) {
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s64)
; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
@@ -1559,7 +1559,7 @@ define <vscale x 4 x i32> @insertelement_nxv4i32(<vscale x 4 x i32> %v, i32 %x)
define <vscale x 1 x i64> @insertelement_nxv1i64_0() {
; RV32-LABEL: name: insertelement_nxv1i64_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
@@ -1568,7 +1568,7 @@ define <vscale x 1 x i64> @insertelement_nxv1i64_0() {
;
; RV64-LABEL: name: insertelement_nxv1i64_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C]](s64)
; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
@@ -1580,7 +1580,7 @@ define <vscale x 1 x i64> @insertelement_nxv1i64_0() {
define <vscale x 1 x i64> @insertelement_nxv1i64_1() {
; RV32-LABEL: name: insertelement_nxv1i64_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
@@ -1589,7 +1589,7 @@ define <vscale x 1 x i64> @insertelement_nxv1i64_1() {
;
; RV64-LABEL: name: insertelement_nxv1i64_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s64)
@@ -1607,7 +1607,7 @@ define <vscale x 1 x i64> @insertelement_nxv1i64_2(i64 %x) {
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
@@ -1618,7 +1618,7 @@ define <vscale x 1 x i64> @insertelement_nxv1i64_2(i64 %x) {
; RV64-NEXT: liveins: $x10
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s64)
; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
@@ -1630,7 +1630,7 @@ define <vscale x 1 x i64> @insertelement_nxv1i64_2(i64 %x) {
define <vscale x 2 x i64> @insertelement_nxv2i64_0() {
; RV32-LABEL: name: insertelement_nxv2i64_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
@@ -1639,7 +1639,7 @@ define <vscale x 2 x i64> @insertelement_nxv2i64_0() {
;
; RV64-LABEL: name: insertelement_nxv2i64_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C]](s64)
; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
@@ -1651,7 +1651,7 @@ define <vscale x 2 x i64> @insertelement_nxv2i64_0() {
define <vscale x 2 x i64> @insertelement_nxv2i64_1() {
; RV32-LABEL: name: insertelement_nxv2i64_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
@@ -1660,7 +1660,7 @@ define <vscale x 2 x i64> @insertelement_nxv2i64_1() {
;
; RV64-LABEL: name: insertelement_nxv2i64_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s64)
@@ -1678,7 +1678,7 @@ define <vscale x 2 x i64> @insertelement_nxv2i64_2(i64 %x) {
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
@@ -1689,7 +1689,7 @@ define <vscale x 2 x i64> @insertelement_nxv2i64_2(i64 %x) {
; RV64-NEXT: liveins: $x10
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s64)
; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
@@ -1701,7 +1701,7 @@ define <vscale x 2 x i64> @insertelement_nxv2i64_2(i64 %x) {
define <vscale x 4 x i64> @insertelement_nxv4i64_0() {
; RV32-LABEL: name: insertelement_nxv4i64_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
@@ -1710,7 +1710,7 @@ define <vscale x 4 x i64> @insertelement_nxv4i64_0() {
;
; RV64-LABEL: name: insertelement_nxv4i64_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C]](s64)
; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
@@ -1722,7 +1722,7 @@ define <vscale x 4 x i64> @insertelement_nxv4i64_0() {
define <vscale x 4 x i64> @insertelement_nxv4i64_1() {
; RV32-LABEL: name: insertelement_nxv4i64_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
@@ -1731,7 +1731,7 @@ define <vscale x 4 x i64> @insertelement_nxv4i64_1() {
;
; RV64-LABEL: name: insertelement_nxv4i64_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s64)
@@ -1749,7 +1749,7 @@ define <vscale x 4 x i64> @insertelement_nxv4i64_2(i64 %x) {
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
@@ -1760,7 +1760,7 @@ define <vscale x 4 x i64> @insertelement_nxv4i64_2(i64 %x) {
; RV64-NEXT: liveins: $x10
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s64)
; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
@@ -1772,7 +1772,7 @@ define <vscale x 4 x i64> @insertelement_nxv4i64_2(i64 %x) {
define <vscale x 8 x i64> @insertelement_nxv8i64_0() {
; RV32-LABEL: name: insertelement_nxv8i64_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
@@ -1781,7 +1781,7 @@ define <vscale x 8 x i64> @insertelement_nxv8i64_0() {
;
; RV64-LABEL: name: insertelement_nxv8i64_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C]](s64)
; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
@@ -1793,7 +1793,7 @@ define <vscale x 8 x i64> @insertelement_nxv8i64_0() {
define <vscale x 8 x i64> @insertelement_nxv8i64_1() {
; RV32-LABEL: name: insertelement_nxv8i64_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
@@ -1802,7 +1802,7 @@ define <vscale x 8 x i64> @insertelement_nxv8i64_1() {
;
; RV64-LABEL: name: insertelement_nxv8i64_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s64)
@@ -1820,7 +1820,7 @@ define <vscale x 8 x i64> @insertelement_nxv8i64_2(i64 %x) {
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
@@ -1831,7 +1831,7 @@ define <vscale x 8 x i64> @insertelement_nxv8i64_2(i64 %x) {
; RV64-NEXT: liveins: $x10
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s64)
; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
@@ -1843,7 +1843,7 @@ define <vscale x 8 x i64> @insertelement_nxv8i64_2(i64 %x) {
define <vscale x 16 x i64> @insertelement_nxv16i64_0() {
; RV32-LABEL: name: insertelement_nxv16i64_0
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
@@ -1854,7 +1854,7 @@ define <vscale x 16 x i64> @insertelement_nxv16i64_0() {
;
; RV64-LABEL: name: insertelement_nxv16i64_0
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C]](s64)
; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
@@ -1868,7 +1868,7 @@ define <vscale x 16 x i64> @insertelement_nxv16i64_0() {
define <vscale x 16 x i64> @insertelement_nxv16i64_1() {
; RV32-LABEL: name: insertelement_nxv16i64_1
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
@@ -1879,7 +1879,7 @@ define <vscale x 16 x i64> @insertelement_nxv16i64_1() {
;
; RV64-LABEL: name: insertelement_nxv16i64_1
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s64)
@@ -1899,7 +1899,7 @@ define <vscale x 16 x i64> @insertelement_nxv16i64_2(i64 %x) {
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_POISON
; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
@@ -1912,7 +1912,7 @@ define <vscale x 16 x i64> @insertelement_nxv16i64_2(i64 %x) {
; RV64-NEXT: liveins: $x10
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_POISON
; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s64)
; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
index dbc13840a0265..0ba2a61692302 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
@@ -84,6 +84,10 @@
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
#
+# DEBUG-NEXT: G_POISON (opcode {{[0-9]+}}): 1 type index, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
+#
# DEBUG-NEXT: G_PHI (opcode {{[0-9]+}}): 1 type index, 0 imm indices
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
>From a8cc5076ee1efbb455e311300106ebdb8d0a0937 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <mat646 at gmail.com>
Date: Sun, 20 Apr 2025 19:55:18 +0000
Subject: [PATCH 4/5] Remove combine logic
---
.../include/llvm/Target/GlobalISel/Combine.td | 49 +------------------
.../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 13 +----
2 files changed, 3 insertions(+), 59 deletions(-)
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 258e846e52e5f..e5e19a1d93486 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -430,12 +430,6 @@ def binop_right_undef_to_undef: GICombineRule<
[{ return Helper.matchOperandIsUndef(*${root}, 2); }]),
(apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
-def binop_right_poison_to_poison
- : GICombineRule<(defs root:$root),
- (match(wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root,
- [{ return Helper.matchOperandIsPoison(*${root}, 2); }]),
- (apply [{ Helper.replaceInstWithPoison(*${root}); }])>;
-
def unary_undef_to_zero: GICombineRule<
(defs root:$root),
(match (wip_match_opcode G_ABS):$root,
@@ -453,17 +447,6 @@ def unary_undef_to_undef : GICombineRule<
(match (unary_undef_to_undef_frags $dst)),
(apply [{ Helper.replaceInstWithUndef(*${dst}.getParent()); }])>;
-def unary_poison_to_poison_frags
- : GICombinePatFrag<(outs root:$dst), (ins),
- !foreach(op,
- [G_TRUNC, G_BITCAST, G_ANYEXT, G_PTRTOINT,
- G_INTTOPTR, G_FPTOSI, G_FPTOUI],
- (pattern(op $dst, $x), (G_POISON $x)))>;
-def unary_poison_to_poison
- : GICombineRule<
- (defs root:$dst), (match(unary_poison_to_poison_frags $dst)),
- (apply [{ Helper.replaceInstWithPoison(*${dst}.getParent()); }])>;
-
// Instructions where if any source operand is undef, the instruction can be
// replaced with undef.
def propagate_undef_any_op: GICombineRule<
@@ -472,15 +455,6 @@ def propagate_undef_any_op: GICombineRule<
[{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
(apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
-// Instructions where if any source operand is poison, the instruction can be
-// replaced with poison.
-def propagate_poison_any_op
- : GICombineRule<
- (defs root:$root),
- (match(wip_match_opcode G_ADD, G_SUB, G_XOR):$root,
- [{ return Helper.matchAnyExplicitUseIsPoison(*${root}); }]),
- (apply [{ Helper.replaceInstWithPoison(*${root}); }])>;
-
// Instructions where if all source operands are undef, the instruction can be
// replaced with undef.
def propagate_undef_all_ops: GICombineRule<
@@ -489,15 +463,6 @@ def propagate_undef_all_ops: GICombineRule<
[{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]),
(apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
-// Instructions where if all source operands are poison, the instruction can be
-// replaced with poison.
-def propagate_poison_all_ops
- : GICombineRule<
- (defs root:$root),
- (match(wip_match_opcode G_SHUFFLE_VECTOR, G_BUILD_VECTOR):$root,
- [{ return Helper.matchAllExplicitUsesArePoison(*${root}); }]),
- (apply [{ Helper.replaceInstWithPoison(*${root}); }])>;
-
// Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF.
def propagate_undef_shuffle_mask: GICombineRule<
(defs root:$root),
@@ -689,13 +654,6 @@ def erase_undef_store : GICombineRule<
(apply [{ Helper.eraseInst(*${root}); }])
>;
-// Erase stores of poison values.
-def erase_poison_store
- : GICombineRule<(defs root:$root),
- (match(wip_match_opcode G_STORE):$root,
- [{ return Helper.matchPoisonStore(*${root}); }]),
- (apply [{ Helper.eraseInst(*${root}); }])>;
-
def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">;
def simplify_add_to_sub: GICombineRule <
(defs root:$root, simplify_add_to_sub_matchinfo:$info),
@@ -2023,11 +1981,6 @@ def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
erase_undef_store,
insert_extract_vec_elt_out_of_bounds]>;
-def poison_combines
- : GICombineGroup<[binop_right_poison_to_poison, unary_poison_to_poison,
- propagate_poison_any_op, propagate_poison_all_ops,
- erase_poison_store]>;
-
def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
binop_same_val, binop_left_to_zero,
binop_right_to_zero, p2i_to_i2p,
@@ -2083,7 +2036,7 @@ def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
vector_ops_combines, freeze_combines, cast_combines,
insert_vec_elt_combines, extract_vec_elt_combines, combines_for_extload,
combine_extracted_vector_load,
- undef_combines, poison_combines, identity_combines, phi_combines,
+ undef_combines, identity_combines, phi_combines,
simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands, shifts_too_big,
reassocs, ptr_add_immed_chain, cmp_combines,
shl_ashr_to_sext_inreg, neg_and_one_to_sext_inreg, sext_inreg_of_load,
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 610d3dd5f95c1..4f0b0683f4b3d 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -330,7 +330,6 @@ bool CombinerHelper::matchCombineConcatVectors(
for (const MachineOperand &BuildVecMO : Def->uses())
Ops.push_back(BuildVecMO.getReg());
break;
- case TargetOpcode::G_POISON:
case TargetOpcode::G_IMPLICIT_DEF: {
LLT OpType = MRI.getType(Reg);
// Keep one undef value for all the undef operands.
@@ -3128,7 +3127,6 @@ bool CombinerHelper::matchCombineInsertVecElts(
// If we didn't end in a G_IMPLICIT_DEF and the source is not fully
// overwritten, bail out.
return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
- TmpInst->getOpcode() == TargetOpcode::G_POISON ||
all_of(MatchInfo, [](Register Reg) { return !!Reg; });
}
@@ -3499,13 +3497,12 @@ bool CombinerHelper::matchUseVectorTruncate(MachineInstr &MI,
if (I < 2)
return false;
- // Check the remaining source elements are only G_IMPLICIT_DEF or G_POISON
+ // Check the remaining source elements are only G_IMPLICIT_DEF
for (; I < NumOperands; ++I) {
auto SrcMI = MRI.getVRegDef(BuildMI->getSourceReg(I));
auto SrcMIOpc = SrcMI->getOpcode();
- if (SrcMIOpc != TargetOpcode::G_IMPLICIT_DEF &&
- SrcMIOpc != TargetOpcode::G_POISON)
+ if (SrcMIOpc != TargetOpcode::G_IMPLICIT_DEF)
return false;
}
@@ -7966,12 +7963,6 @@ bool CombinerHelper::matchShuffleDisjointMask(MachineInstr &MI,
if (getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, Shuffle.getSrc2Reg(), MRI))
return false;
- if (getOpcodeDef(TargetOpcode::G_POISON, Shuffle.getSrc1Reg(), MRI))
- return false;
-
- if (getOpcodeDef(TargetOpcode::G_POISON, Shuffle.getSrc2Reg(), MRI))
- return false;
-
const LLT DstTy = MRI.getType(Shuffle.getReg(0));
const LLT Src1Ty = MRI.getType(Shuffle.getSrc1Reg());
if (!isLegalOrBeforeLegalizer(
>From 18ab129e157494a7cb51f52a92939d89abcdc0f5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <mat646 at gmail.com>
Date: Sun, 20 Apr 2025 20:23:12 +0000
Subject: [PATCH 5/5] another patch
---
llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp | 7 +++++--
llvm/lib/Target/AArch64/AArch64Combine.td | 12 +++++++++++-
.../AArch64/GlobalISel/legalizer-info-validation.mir | 1 +
3 files changed, 17 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 4f0b0683f4b3d..8c0676c1ea11f 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -330,6 +330,7 @@ bool CombinerHelper::matchCombineConcatVectors(
for (const MachineOperand &BuildVecMO : Def->uses())
Ops.push_back(BuildVecMO.getReg());
break;
+ case TargetOpcode::G_POISON:
case TargetOpcode::G_IMPLICIT_DEF: {
LLT OpType = MRI.getType(Reg);
// Keep one undef value for all the undef operands.
@@ -3127,6 +3128,7 @@ bool CombinerHelper::matchCombineInsertVecElts(
// If we didn't end in a G_IMPLICIT_DEF and the source is not fully
// overwritten, bail out.
return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
+ TmpInst->getOpcode() == TargetOpcode::G_POISON ||
all_of(MatchInfo, [](Register Reg) { return !!Reg; });
}
@@ -3497,12 +3499,13 @@ bool CombinerHelper::matchUseVectorTruncate(MachineInstr &MI,
if (I < 2)
return false;
- // Check the remaining source elements are only G_IMPLICIT_DEF
+ // Check the remaining source elements are only G_IMPLICIT_DEF or G_POISON
for (; I < NumOperands; ++I) {
auto SrcMI = MRI.getVRegDef(BuildMI->getSourceReg(I));
auto SrcMIOpc = SrcMI->getOpcode();
- if (SrcMIOpc != TargetOpcode::G_IMPLICIT_DEF)
+ if (SrcMIOpc != TargetOpcode::G_IMPLICIT_DEF &&
+ SrcMIOpc != TargetOpcode::G_POISON)
return false;
}
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index f84e83816bf33..841f966a5afaa 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -140,6 +140,15 @@ def fullrev: GICombineRule <
(apply [{ applyFullRev(*${root}, MRI); }])
>;
+def fullrevpoison: GICombineRule <
+ (defs root:$root, shuffle_matchdata:$matchinfo),
+ (match (G_POISON $src2),
+ (G_SHUFFLE_VECTOR $src, $src1, $src2, $mask):$root,
+ [{ return ShuffleVectorInst::isReverseMask(${mask}.getShuffleMask(),
+ ${mask}.getShuffleMask().size()); }]),
+ (apply [{ applyFullRev(*${root}, MRI); }])
+>;
+
def insertelt_nonconst: GICombineRule <
(defs root:$root, shuffle_matchdata:$matchinfo),
(match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
@@ -173,7 +182,8 @@ def form_duplane : GICombineRule <
>;
def shuffle_vector_lowering : GICombineGroup<[dup, rev, ext, zip, uzp, trn, fullrev,
- form_duplane, shuf_to_ins]>;
+ fullrevpoison, form_duplane,
+ shuf_to_ins]>;
// Turn G_UNMERGE_VALUES -> G_EXTRACT_VECTOR_ELT's
def vector_unmerge_lowering : GICombineRule <
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index 273a4ba8d8bf6..a28678cdad13c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -83,6 +83,7 @@
# DEBUG-NEXT: .. the first uncovered imm index: {{[0-9]+}}, OK
#
# DEBUG-NEXT: G_POISON (opcode {{[0-9]+}}): 1 type index, 0 imm indices
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. the first uncovered type index: {{[0-9]+}}, OK
# DEBUG-NEXT: .. the first uncovered imm index: {{[0-9]+}}, OK
#
More information about the llvm-commits
mailing list