[llvm] Introducing a new ISD::POISON SDNode to represent the poison value in the IR. (PR #125883)
zhijian lin via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 7 12:30:44 PST 2025
https://github.com/diggerlin updated https://github.com/llvm/llvm-project/pull/125883
>From 15c2c17e225abe5bff75a580693b28836305cac8 Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Wed, 5 Feb 2025 17:04:32 +0000
Subject: [PATCH] introduce a new ISDNODE POISON
---
llvm/include/llvm/CodeGen/ISDOpcodes.h | 3 +
llvm/include/llvm/CodeGen/SelectionDAG.h | 7 +-
llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 11 +
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 319 +++++++-------
llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 32 +-
.../SelectionDAG/LegalizeIntegerTypes.cpp | 2 +
.../SelectionDAG/LegalizeVectorTypes.cpp | 48 +-
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 218 +++++-----
.../SelectionDAG/SelectionDAGBuilder.cpp | 2 +-
.../SelectionDAG/SelectionDAGDumper.cpp | 1 +
.../CodeGen/SelectionDAG/SelectionDAGISel.cpp | 1 +
.../SelectionDAG/StatepointLowering.cpp | 8 +-
.../CodeGen/SelectionDAG/TargetLowering.cpp | 30 +-
.../Target/AArch64/AArch64ISelLowering.cpp | 101 +++--
llvm/lib/Target/ARM/ARMISelLowering.cpp | 68 +--
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 63 +--
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 106 ++---
llvm/lib/Target/X86/X86ISelLowering.cpp | 411 ++++++++++--------
18 files changed, 781 insertions(+), 650 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 046d9befd0e9158..f249fdb58c9a626 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -217,6 +217,9 @@ enum NodeType {
/// UNDEF - An undefined node.
UNDEF,
+ /// POISON - A poison node.
+ POISON,
+
/// FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or
/// is evaluated to UNDEF), or returns VAL otherwise. Note that each
/// read of UNDEF can yield different value, but FREEZE(UNDEF) cannot.
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index 461c0c1ead16d2c..7c6423727510bce 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -873,7 +873,7 @@ class SelectionDAG {
/// for integers, a type wider than) VT's element type.
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
// VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
- if (Op.getOpcode() == ISD::UNDEF) {
+ if (Op.isUndefOrPoison()) {
assert((VT.getVectorElementType() == Op.getValueType() ||
(VT.isInteger() &&
VT.getVectorElementType().bitsLE(Op.getValueType()))) &&
@@ -889,7 +889,7 @@ class SelectionDAG {
// Return a splat ISD::SPLAT_VECTOR node, consisting of Op splatted to all
// elements.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op) {
- if (Op.getOpcode() == ISD::UNDEF) {
+ if (Op.isUndefOrPoison()) {
assert((VT.getVectorElementType() == Op.getValueType() ||
(VT.isInteger() &&
VT.getVectorElementType().bitsLE(Op.getValueType()))) &&
@@ -1130,6 +1130,9 @@ class SelectionDAG {
return getNode(ISD::UNDEF, SDLoc(), VT);
}
+ /// Return an POISON node. POISON does not have a useful SDLoc.
+ SDValue getPoison(EVT VT) { return getNode(ISD::POISON, SDLoc(), VT); }
+
/// Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm,
bool ConstantFold = true);
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 8c1e2fa6f57a878..b7898e03c0f80c8 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -213,6 +213,7 @@ class SDValue {
inline bool isTargetOpcode() const;
inline bool isMachineOpcode() const;
inline bool isUndef() const;
+ inline bool isUndefOrPoison() const;
inline unsigned getMachineOpcode() const;
inline const DebugLoc &getDebugLoc() const;
inline void dump() const;
@@ -693,6 +694,14 @@ END_TWO_BYTE_PACK()
/// Return true if the type of the node type undefined.
bool isUndef() const { return NodeType == ISD::UNDEF; }
+ /// Return true if the type of the node type poison.
+ bool isPoison() const { return NodeType == ISD::POISON; }
+
+ /// Return true if the type of the node type undefined or poison.
+ bool isUndefOrPoison() const {
+ return NodeType == ISD::UNDEF || NodeType == ISD::POISON;
+ }
+
/// Test if this node is a memory intrinsic (with valid pointer information).
bool isMemIntrinsic() const { return SDNodeBits.IsMemIntrinsic; }
@@ -1250,6 +1259,8 @@ inline bool SDValue::isUndef() const {
return Node->isUndef();
}
+inline bool SDValue::isUndefOrPoison() const { return Node->isUndefOrPoison(); }
+
inline bool SDValue::use_empty() const {
return !Node->hasAnyUseOfValue(ResNo);
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index ef58da873c59c58..67ea17da8776600 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -1043,7 +1043,7 @@ static bool isConstantOrConstantVector(SDValue N, bool NoOpaques = false) {
return false;
unsigned BitWidth = N.getScalarValueSizeInBits();
for (const SDValue &Op : N->op_values()) {
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
continue;
ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Op);
if (!Const || Const->getAPIntValue().getBitWidth() != BitWidth ||
@@ -2098,7 +2098,7 @@ SDValue DAGCombiner::visitFCANONICALIZE(SDNode *N) {
SDLoc dl(N);
// Canonicalize undef to quiet NaN.
- if (Operand.isUndef()) {
+ if (Operand.isUndefOrPoison()) {
APFloat CanonicalQNaN = APFloat::getQNaN(VT.getFltSemantics());
return DAG.getConstantFP(CanonicalQNaN, dl, VT);
}
@@ -2660,9 +2660,9 @@ SDValue DAGCombiner::visitADDLike(SDNode *N) {
SDLoc DL(N);
// fold (add x, undef) -> undef
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return N0;
- if (N1.isUndef())
+ if (N1.isUndefOrPoison())
return N1;
// fold (add c1, c2) -> c1+c2
@@ -3039,7 +3039,7 @@ SDValue DAGCombiner::visitADDSAT(SDNode *N) {
SDLoc DL(N);
// fold (add_sat x, undef) -> -1
- if (N0.isUndef() || N1.isUndef())
+ if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
return DAG.getAllOnesConstant(DL, VT);
// fold (add_sat c1, c2) -> c3
@@ -4043,9 +4043,9 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
DAG.getNode(ISD::MUL, DL, VT, B, C));
// If either operand of a sub is undef, the result is undef
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return N0;
- if (N1.isUndef())
+ if (N1.isUndefOrPoison())
return N1;
if (SDValue V = foldAddSubBoolOfMaskedVal(N, DL, DAG))
@@ -4240,7 +4240,7 @@ SDValue DAGCombiner::visitSUBSAT(SDNode *N) {
SDLoc DL(N);
// fold (sub_sat x, undef) -> 0
- if (N0.isUndef() || N1.isUndef())
+ if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
return DAG.getConstant(0, DL, VT);
// fold (sub_sat x, x) -> 0
@@ -4393,7 +4393,7 @@ SDValue DAGCombiner::visitMULFIX(SDNode *N) {
EVT VT = N0.getValueType();
// fold (mulfix x, undef, scale) -> 0
- if (N0.isUndef() || N1.isUndef())
+ if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
return DAG.getConstant(0, SDLoc(N), VT);
// Canonicalize constant to RHS (vector doesn't have to splat)
@@ -4418,7 +4418,7 @@ template <class MatchContextClass> SDValue DAGCombiner::visitMUL(SDNode *N) {
MatchContextClass Matcher(DAG, TLI, N);
// fold (mul x, undef) -> 0
- if (N0.isUndef() || N1.isUndef())
+ if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
return DAG.getConstant(0, DL, VT);
// fold (mul c1, c2) -> c1*c2
@@ -4778,7 +4778,7 @@ static SDValue simplifyDivRem(SDNode *N, SelectionDAG &DAG) {
// undef / X -> 0
// undef % X -> 0
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getConstant(0, DL, VT);
// 0 / X -> 0
@@ -5197,7 +5197,7 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {
DAG.getShiftAmountConstant(N0.getScalarValueSizeInBits() - 1, VT, DL));
// fold (mulhs x, undef) -> 0
- if (N0.isUndef() || N1.isUndef())
+ if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
return DAG.getConstant(0, DL, VT);
// If the type twice as wide is legal, transform the mulhs to a wider multiply
@@ -5254,7 +5254,7 @@ SDValue DAGCombiner::visitMULHU(SDNode *N) {
return DAG.getConstant(0, DL, VT);
// fold (mulhu x, undef) -> 0
- if (N0.isUndef() || N1.isUndef())
+ if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
return DAG.getConstant(0, DL, VT);
// fold (mulhu x, (1 << c)) -> x >> (bitwidth - c)
@@ -5318,9 +5318,9 @@ SDValue DAGCombiner::visitAVG(SDNode *N) {
return FoldedVOp;
// fold (avg x, undef) -> x
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return N1;
- if (N1.isUndef())
+ if (N1.isUndefOrPoison())
return N0;
// fold (avg x, x) --> x
@@ -5413,7 +5413,7 @@ SDValue DAGCombiner::visitABD(SDNode *N) {
return FoldedVOp;
// fold (abd x, undef) -> 0
- if (N0.isUndef() || N1.isUndef())
+ if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
return DAG.getConstant(0, DL, VT);
// fold (abd x, x) -> 0
@@ -5840,8 +5840,9 @@ SDValue DAGCombiner::visitIMINMAX(SDNode *N) {
// 2. The saturation pattern is broken by canonicalization in InstCombine.
bool IsOpIllegal = !TLI.isOperationLegal(Opcode, VT);
bool IsSatBroken = Opcode == ISD::UMIN && N0.getOpcode() == ISD::SMAX;
- if ((IsSatBroken || IsOpIllegal) && (N0.isUndef() || DAG.SignBitIsZero(N0)) &&
- (N1.isUndef() || DAG.SignBitIsZero(N1))) {
+ if ((IsSatBroken || IsOpIllegal) &&
+ (N0.isUndefOrPoison() || DAG.SignBitIsZero(N0)) &&
+ (N1.isUndefOrPoison() || DAG.SignBitIsZero(N1))) {
unsigned AltOpcode;
switch (Opcode) {
case ISD::SMIN: AltOpcode = ISD::UMIN; break;
@@ -6043,7 +6044,7 @@ SDValue DAGCombiner::hoistLogicOpWithSameOpcodeHands(SDNode *N) {
// Don't try to fold this node if it requires introducing a
// build vector of all zeros that might be illegal at this stage.
SDValue ShOp = N0.getOperand(1);
- if (LogicOpcode == ISD::XOR && !ShOp.isUndef())
+ if (LogicOpcode == ISD::XOR && !ShOp.isUndefOrPoison())
ShOp = tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);
// (logic_op (shuf (A, C), shuf (B, C))) --> shuf (logic_op (A, B), C)
@@ -6056,7 +6057,7 @@ SDValue DAGCombiner::hoistLogicOpWithSameOpcodeHands(SDNode *N) {
// Don't try to fold this node if it requires introducing a
// build vector of all zeros that might be illegal at this stage.
ShOp = N0.getOperand(0);
- if (LogicOpcode == ISD::XOR && !ShOp.isUndef())
+ if (LogicOpcode == ISD::XOR && !ShOp.isUndefOrPoison())
ShOp = tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);
// (logic_op (shuf (C, A), shuf (C, B))) --> shuf (C, logic_op (A, B))
@@ -6502,7 +6503,7 @@ SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, SDNode *N) {
SDLoc DL(N);
// fold (and x, undef) -> 0
- if (N0.isUndef() || N1.isUndef())
+ if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
return DAG.getConstant(0, DL, VT);
if (SDValue V = foldLogicOfSetCCs(true, N0, N1, DL))
@@ -7851,7 +7852,7 @@ SDValue DAGCombiner::visitORLike(SDValue N0, SDValue N1, const SDLoc &DL) {
EVT VT = N1.getValueType();
// fold (or x, undef) -> -1
- if (!LegalOperations && (N0.isUndef() || N1.isUndef()))
+ if (!LegalOperations && (N0.isUndefOrPoison() || N1.isUndefOrPoison()))
return DAG.getAllOnesConstant(DL, VT);
if (SDValue V = foldLogicOfSetCCs(false, N0, N1, DL))
@@ -9551,13 +9552,13 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
SDLoc DL(N);
// fold (xor undef, undef) -> 0. This is a common idiom (misuse).
- if (N0.isUndef() && N1.isUndef())
+ if (N0.isUndefOrPoison() && N1.isUndefOrPoison())
return DAG.getConstant(0, DL, VT);
// fold (xor x, undef) -> undef
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return N0;
- if (N1.isUndef())
+ if (N1.isUndefOrPoison())
return N1;
// fold (xor c1, c2) -> c1^c2
@@ -10952,7 +10953,7 @@ SDValue DAGCombiner::visitFunnelShift(SDNode *N) {
return IsFSHL ? N0 : N1;
auto IsUndefOrZero = [](SDValue V) {
- return V.isUndef() || isNullOrNullSplat(V, /*AllowUndefs*/ true);
+ return V.isUndefOrPoison() || isNullOrNullSplat(V, /*AllowUndefs*/ true);
};
// TODO - support non-uniform vector shift amounts.
@@ -12075,7 +12076,7 @@ static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) {
// length of the BV and see if all the non-undef nodes are the same.
ConstantSDNode *BottomHalf = nullptr;
for (int i = 0; i < NumElems / 2; ++i) {
- if (Cond->getOperand(i)->isUndef())
+ if (Cond->getOperand(i)->isUndefOrPoison())
continue;
if (BottomHalf == nullptr)
@@ -12087,7 +12088,7 @@ static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) {
// Do the same for the second half of the BuildVector
ConstantSDNode *TopHalf = nullptr;
for (int i = NumElems / 2; i < NumElems; ++i) {
- if (Cond->getOperand(i)->isUndef())
+ if (Cond->getOperand(i)->isUndefOrPoison())
continue;
if (TopHalf == nullptr)
@@ -12251,7 +12252,7 @@ SDValue DAGCombiner::visitMSTORE(SDNode *N) {
if (MaskedStoreSDNode *MST1 = dyn_cast<MaskedStoreSDNode>(Chain)) {
if (MST->isUnindexed() && MST->isSimple() && MST1->isUnindexed() &&
MST1->isSimple() && MST1->getBasePtr() == Ptr &&
- !MST->getBasePtr().isUndef() &&
+ !MST->getBasePtr().isUndefOrPoison() &&
((Mask == MST1->getMask() && MST->getMemoryVT().getStoreSize() ==
MST1->getMemoryVT().getStoreSize()) ||
ISD::isConstantSplatVectorAllOnes(Mask.getNode())) &&
@@ -12339,13 +12340,13 @@ SDValue DAGCombiner::visitVECTOR_COMPRESS(SDNode *N) {
SDValue Passthru = N->getOperand(2);
EVT VecVT = Vec.getValueType();
- bool HasPassthru = !Passthru.isUndef();
+ bool HasPassthru = !Passthru.isUndefOrPoison();
APInt SplatVal;
if (ISD::isConstantSplatVector(Mask.getNode(), SplatVal))
return TLI.isConstTrueVal(Mask) ? Vec : Passthru;
- if (Vec.isUndef() || Mask.isUndef())
+ if (Vec.isUndefOrPoison() || Mask.isUndefOrPoison())
return Passthru;
// No need for potentially expensive compress if the mask is constant.
@@ -12357,7 +12358,7 @@ SDValue DAGCombiner::visitVECTOR_COMPRESS(SDNode *N) {
for (unsigned I = 0; I < NumElmts; ++I) {
SDValue MaskI = Mask.getOperand(I);
// We treat undef mask entries as "false".
- if (MaskI.isUndef())
+ if (MaskI.isUndefOrPoison())
continue;
if (TLI.isConstTrueVal(MaskI)) {
@@ -12535,7 +12536,7 @@ SDValue DAGCombiner::foldVSelectOfConstants(SDNode *N) {
for (unsigned i = 0; i != Elts; ++i) {
SDValue N1Elt = N1.getOperand(i);
SDValue N2Elt = N2.getOperand(i);
- if (N1Elt.isUndef() || N2Elt.isUndef())
+ if (N1Elt.isUndefOrPoison() || N2Elt.isUndefOrPoison())
continue;
if (N1Elt.getValueType() != N2Elt.getValueType()) {
AllAddOne = false;
@@ -12897,7 +12898,7 @@ SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
// When the condition is UNDEF, just return the first operand. This is
// coherent the DAG creation, no setcc node is created in this case
- if (SCC->isUndef())
+ if (SCC->isUndefOrPoison())
return N2;
// Fold to a simpler select_cc
@@ -13204,7 +13205,7 @@ static SDValue tryToFoldExtendOfConstant(SDNode *N, const SDLoc &DL,
for (unsigned i = 0; i != NumElts; ++i) {
SDValue Op = N0.getOperand(i);
- if (Op.isUndef()) {
+ if (Op.isUndefOrPoison()) {
if (Opcode == ISD::ANY_EXTEND || Opcode == ISD::ANY_EXTEND_VECTOR_INREG)
Elts.push_back(DAG.getUNDEF(SVT));
else
@@ -13850,7 +13851,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
return FoldedVOp;
// sext(undef) = 0 because the top bit will all be the same.
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getConstant(0, DL, VT);
if (SDValue Res = tryToFoldExtendOfConstant(N, DL, TLI, DAG, LegalTypes))
@@ -14125,7 +14126,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
return FoldedVOp;
// zext(undef) = 0
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getConstant(0, DL, VT);
if (SDValue Res = tryToFoldExtendOfConstant(N, DL, TLI, DAG, LegalTypes))
@@ -14450,7 +14451,7 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
SDLoc DL(N);
// aext(undef) = undef
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getUNDEF(VT);
if (SDValue Res = tryToFoldExtendOfConstant(N, DL, TLI, DAG, LegalTypes))
@@ -14956,7 +14957,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
SDLoc DL(N);
// sext_vector_inreg(undef) = 0 because the top bit will all be the same.
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getConstant(0, DL, VT);
// fold (sext_in_reg c1) -> c1
@@ -15176,7 +15177,7 @@ SDValue DAGCombiner::visitEXTEND_VECTOR_INREG(SDNode *N) {
EVT VT = N->getValueType(0);
SDLoc DL(N);
- if (N0.isUndef()) {
+ if (N0.isUndefOrPoison()) {
// aext_vector_inreg(undef) = undef because the top bits are undefined.
// {s/z}ext_vector_inreg(undef) = 0 because the top bits must be the same.
return N->getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG
@@ -15322,7 +15323,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDLoc DL(N);
// trunc(undef) = undef
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getUNDEF(VT);
// fold (truncate (truncate x)) -> (truncate x)
@@ -15542,7 +15543,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) {
SDValue X = N0.getOperand(i);
- if (!X.isUndef()) {
+ if (!X.isUndefOrPoison()) {
V = X;
Idx = i;
NumDefs++;
@@ -15788,7 +15789,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getUNDEF(VT);
// If the input is a BUILD_VECTOR with all constant elements, fold this now.
@@ -16025,7 +16026,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
if (Op.getOpcode() == ISD::BITCAST &&
Op.getOperand(0).getValueType() == VT)
return SDValue(Op.getOperand(0));
- if (Op.isUndef() || isAnyConstantBuildVector(Op))
+ if (Op.isUndefOrPoison() || isAnyConstantBuildVector(Op))
return DAG.getBitcast(VT, Op);
return SDValue();
};
@@ -16105,8 +16106,9 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
if (llvm::ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
SmallVector<SDValue, 8> NewVecC;
for (const SDValue &Op : N0->op_values())
- NewVecC.push_back(
- Op.isUndef() ? DAG.getConstant(0, DL, Op.getValueType()) : Op);
+ NewVecC.push_back(Op.isUndefOrPoison()
+ ? DAG.getConstant(0, DL, Op.getValueType())
+ : Op);
return DAG.getBuildVector(VT, DL, NewVecC);
}
}
@@ -16145,7 +16147,7 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
// also recursively replace t184 by t150.
SDValue MaybePoisonOperand = N->getOperand(0).getOperand(OpNo);
// Don't replace every single UNDEF everywhere with frozen UNDEF, though.
- if (MaybePoisonOperand.getOpcode() == ISD::UNDEF)
+ if (MaybePoisonOperand.isUndefOrPoison())
continue;
// First, freeze each offending operand.
SDValue FrozenMaybePoisonOperand = DAG.getFreeze(MaybePoisonOperand);
@@ -16171,9 +16173,10 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
// Finally, recreate the node, it's operands were updated to use
// frozen operands, so we just need to use it's "original" operands.
SmallVector<SDValue> Ops(N0->ops());
- // Special-handle ISD::UNDEF, each single one of them can be it's own thing.
+ // Special-handle ISD::UNDEF, ISD::POISON, each single one of them can be it's
+ // own thing.
for (SDValue &Op : Ops) {
- if (Op.getOpcode() == ISD::UNDEF)
+ if (Op.isUndefOrPoison())
Op = DAG.getFreeze(Op);
}
@@ -18136,7 +18139,7 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
SDLoc DL(N);
// [us]itofp(undef) = 0, because the result value is bounded.
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getConstantFP(0.0, DL, VT);
// fold (sint_to_fp c1) -> c1fp
@@ -18184,7 +18187,7 @@ SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
SDLoc DL(N);
// [us]itofp(undef) = 0, because the result value is bounded.
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getConstantFP(0.0, DL, VT);
// fold (uint_to_fp c1) -> c1fp
@@ -18262,7 +18265,7 @@ SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
SDLoc DL(N);
// fold (fp_to_sint undef) -> undef
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getUNDEF(VT);
// fold (fp_to_sint c1fp) -> c1
@@ -18278,7 +18281,7 @@ SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
SDLoc DL(N);
// fold (fp_to_uint undef) -> undef
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getUNDEF(VT);
// fold (fp_to_uint c1fp) -> c1
@@ -18294,7 +18297,7 @@ SDValue DAGCombiner::visitXROUND(SDNode *N) {
// fold (lrint|llrint undef) -> undef
// fold (lround|llround undef) -> undef
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getUNDEF(VT);
// fold (lrint|llrint c1fp) -> c1
@@ -19436,7 +19439,7 @@ SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
}
// TODO: Deal with nonzero offset.
- if (LD->getBasePtr().isUndef() || Offset != 0)
+ if (LD->getBasePtr().isUndefOrPoison() || Offset != 0)
return SDValue();
// Model necessary truncations / extenstions.
// Truncate Value To Stored Memory Size.
@@ -19751,7 +19754,7 @@ struct LoadedSlice {
return false;
// Offsets are for indexed load only, we do not handle that.
- if (!Origin->getOffset().isUndef())
+ if (!Origin->getOffset().isUndefOrPoison())
return false;
const TargetLowering &TLI = DAG->getTargetLoweringInfo();
@@ -20817,7 +20820,7 @@ DAGCombiner::getStoreMergeCandidates(StoreSDNode *St,
// pointer. We must have a base and an offset. Do not handle stores to undef
// base pointers.
BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
- if (!BasePtr.getBase().getNode() || BasePtr.getBase().isUndef())
+ if (!BasePtr.getBase().getNode() || BasePtr.getBase().isUndefOrPoison())
return nullptr;
SDValue Val = peekThroughBitcasts(St->getValue());
@@ -21878,7 +21881,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
}
// Turn 'store undef, Ptr' -> nothing.
- if (Value.isUndef() && ST->isUnindexed() && !ST->isVolatile())
+ if (Value.isUndefOrPoison() && ST->isUnindexed() && !ST->isVolatile())
return Chain;
// Try to infer better alignment information than the store already has.
@@ -22008,7 +22011,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
}
if (OptLevel != CodeGenOptLevel::None && ST1->hasOneUse() &&
- !ST1->getBasePtr().isUndef() &&
+ !ST1->getBasePtr().isUndefOrPoison() &&
ST->getAddressSpace() == ST1->getAddressSpace()) {
// If we consider two stores and one smaller in size is a scalable
// vector type and another one a bigger size store with a fixed type,
@@ -22303,7 +22306,7 @@ static bool mergeEltWithShuffle(SDValue &X, SDValue &Y, ArrayRef<int> Mask,
// If we failed to find a match, see if we can replace an UNDEF shuffle
// operand.
if (ElementOffset == -1) {
- if (!Y.isUndef() || InsertVal0.getValueType() != Y.getValueType())
+ if (!Y.isUndefOrPoison() || InsertVal0.getValueType() != Y.getValueType())
return false;
ElementOffset = Mask.size();
Y = InsertVal0;
@@ -22523,7 +22526,7 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
if (!IndexC) {
// If this is variable insert to undef vector, it might be better to splat:
// inselt undef, InVal, EltNo --> build_vector < InVal, InVal, ... >
- if (InVec.isUndef() && TLI.shouldSplatInsEltVarIndex(VT))
+ if (InVec.isUndefOrPoison() && TLI.shouldSplatInsEltVarIndex(VT))
return DAG.getSplat(VT, DL, InVal);
return SDValue();
}
@@ -22612,7 +22615,7 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
// Recurse up a INSERT_VECTOR_ELT chain to build a BUILD_VECTOR.
for (SDValue CurVec = InVec; CurVec;) {
// UNDEF - build new BUILD_VECTOR from already inserted operands.
- if (CurVec.isUndef())
+ if (CurVec.isUndefOrPoison())
return CanonicalizeBuildVector(Ops);
// BUILD_VECTOR - insert unused operands and build new BUILD_VECTOR.
@@ -22936,7 +22939,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
SDValue Index = N->getOperand(1);
EVT ScalarVT = N->getValueType(0);
EVT VecVT = VecOp.getValueType();
- if (VecOp.isUndef())
+ if (VecOp.isUndefOrPoison())
return DAG.getUNDEF(ScalarVT);
// extract_vector_elt (insert_vector_elt vec, val, idx), idx) -> val
@@ -23315,7 +23318,8 @@ SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
for (unsigned i = 0; i != NumInScalars; ++i) {
SDValue In = N->getOperand(i);
// Ignore undef inputs.
- if (In.isUndef()) continue;
+ if (In.isUndefOrPoison())
+ continue;
bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND;
bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND;
@@ -23375,10 +23379,10 @@ SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
SDValue Cast = N->getOperand(i);
assert((Cast.getOpcode() == ISD::ANY_EXTEND ||
- Cast.getOpcode() == ISD::ZERO_EXTEND ||
- Cast.isUndef()) && "Invalid cast opcode");
+ Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndefOrPoison()) &&
+ "Invalid cast opcode");
SDValue In;
- if (Cast.isUndef())
+ if (Cast.isUndefOrPoison())
In = DAG.getUNDEF(SourceType);
else
In = Cast->getOperand(0);
@@ -23448,7 +23452,8 @@ SDValue DAGCombiner::reduceBuildVecTruncToBitCast(SDNode *N) {
for (unsigned i = 0; i != NumInScalars; ++i) {
SDValue In = PeekThroughBitcast(N->getOperand(i));
// Ignore undef inputs.
- if (In.isUndef()) continue;
+ if (In.isUndefOrPoison())
+ continue;
if (In.getOpcode() != ISD::TRUNCATE)
return SDValue();
@@ -23643,7 +23648,7 @@ static SDValue reduceBuildVecToShuffleWithZero(SDNode *BV, SelectionDAG &DAG) {
int ZextElt = -1;
for (int i = 0; i != NumBVOps; ++i) {
SDValue Op = BV->getOperand(i);
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
continue;
if (ZextElt == -1)
ZextElt = i;
@@ -23760,7 +23765,7 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
for (unsigned i = 0; i != NumElems; ++i) {
SDValue Op = N->getOperand(i);
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
continue;
// See if we can use a blend with a zero vector.
@@ -23969,7 +23974,7 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
SDValue L = Shuffles[Left];
ArrayRef<int> LMask;
bool IsLeftShuffle = L.getOpcode() == ISD::VECTOR_SHUFFLE &&
- L.use_empty() && L.getOperand(1).isUndef() &&
+ L.use_empty() && L.getOperand(1).isUndefOrPoison() &&
L.getOperand(0).getValueType() == L.getValueType();
if (IsLeftShuffle) {
LMask = cast<ShuffleVectorSDNode>(L.getNode())->getMask();
@@ -23978,7 +23983,8 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
SDValue R = Shuffles[Right];
ArrayRef<int> RMask;
bool IsRightShuffle = R.getOpcode() == ISD::VECTOR_SHUFFLE &&
- R.use_empty() && R.getOperand(1).isUndef() &&
+ R.use_empty() &&
+ R.getOperand(1).isUndefOrPoison() &&
R.getOperand(0).getValueType() == R.getValueType();
if (IsRightShuffle) {
RMask = cast<ShuffleVectorSDNode>(R.getNode())->getMask();
@@ -24161,7 +24167,8 @@ SDValue DAGCombiner::convertBuildVecZextToBuildVecWithZeros(SDNode *N) {
NewOps.reserve(NewIntVT.getVectorNumElements());
for (auto I : enumerate(N->ops())) {
SDValue Op = I.value();
- assert(!Op.isUndef() && "FIXME: after allowing UNDEF's, handle them here.");
+ assert(!Op.isUndefOrPoison() &&
+ "FIXME: after allowing UNDEF's, handle them here.");
unsigned SrcOpIdx = I.index();
if (KnownZeroOps[SrcOpIdx]) {
NewOps.append(*Factor, ZeroOp);
@@ -24262,7 +24269,8 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
// Do this late as some of the above may replace the splat.
if (TLI.getOperationAction(ISD::SPLAT_VECTOR, VT) != TargetLowering::Expand)
if (SDValue V = cast<BuildVectorSDNode>(N)->getSplatValue()) {
- assert(!V.isUndef() && "Splat of undef should have been handled earlier");
+ assert(!V.isUndefOrPoison() &&
+ "Splat of undef should have been handled earlier");
return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, V);
}
@@ -24289,7 +24297,7 @@ static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
if (ISD::BITCAST == Op.getOpcode() &&
!Op.getOperand(0).getValueType().isVector())
Ops.push_back(Op.getOperand(0));
- else if (ISD::UNDEF == Op.getOpcode())
+ else if (Op.isUndefOrPoison())
Ops.push_back(DAG.getNode(ISD::UNDEF, DL, SVT));
else
return SDValue();
@@ -24311,7 +24319,7 @@ static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
for (SDValue &Op : Ops) {
if (Op.getValueType() == SVT)
continue;
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
Op = DAG.getNode(ISD::UNDEF, DL, SVT);
else
Op = DAG.getBitcast(SVT, Op);
@@ -24334,7 +24342,7 @@ static SDValue combineConcatVectorOfConcatVectors(SDNode *N,
EVT SubVT;
SDValue FirstConcat;
for (const SDValue &Op : N->ops()) {
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
continue;
if (Op.getOpcode() != ISD::CONCAT_VECTORS)
return SDValue();
@@ -24352,7 +24360,7 @@ static SDValue combineConcatVectorOfConcatVectors(SDNode *N,
SmallVector<SDValue> ConcatOps;
for (const SDValue &Op : N->ops()) {
- if (Op.isUndef()) {
+ if (Op.isUndefOrPoison()) {
ConcatOps.append(FirstConcat->getNumOperands(), DAG.getUNDEF(SubVT));
continue;
}
@@ -24383,7 +24391,7 @@ static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) {
Op = peekThroughBitcasts(Op);
// UNDEF nodes convert to UNDEF shuffle mask values.
- if (Op.isUndef()) {
+ if (Op.isUndefOrPoison()) {
Mask.append((unsigned)NumOpElts, -1);
continue;
}
@@ -24401,7 +24409,7 @@ static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) {
ExtVec = peekThroughBitcasts(ExtVec);
// UNDEF nodes convert to UNDEF shuffle mask values.
- if (ExtVec.isUndef()) {
+ if (ExtVec.isUndefOrPoison()) {
Mask.append((unsigned)NumOpElts, -1);
continue;
}
@@ -24421,11 +24429,11 @@ static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) {
return SDValue();
// At most we can reference 2 inputs in the final shuffle.
- if (SV0.isUndef() || SV0 == ExtVec) {
+ if (SV0.isUndefOrPoison() || SV0 == ExtVec) {
SV0 = ExtVec;
for (int i = 0; i != NumOpElts; ++i)
Mask.push_back(i + ExtIdx);
- } else if (SV1.isUndef() || SV1 == ExtVec) {
+ } else if (SV1.isUndefOrPoison() || SV1 == ExtVec) {
SV1 = ExtVec;
for (int i = 0; i != NumOpElts; ++i)
Mask.push_back(i + ExtIdx + NumElts);
@@ -24531,10 +24539,10 @@ static SDValue combineConcatVectorOfShuffleAndItsOperands(
ShuffleVectorSDNode *SVN = nullptr;
for (SDValue Op : N->ops()) {
if (auto *CurSVN = dyn_cast<ShuffleVectorSDNode>(Op);
- CurSVN && CurSVN->getOperand(1).isUndef() && N->isOnlyUserOf(CurSVN) &&
- all_of(N->ops(), [CurSVN](SDValue Op) {
+ CurSVN && CurSVN->getOperand(1).isUndefOrPoison() &&
+ N->isOnlyUserOf(CurSVN) && all_of(N->ops(), [CurSVN](SDValue Op) {
// FIXME: can we allow UNDEF operands?
- return !Op.isUndef() &&
+ return !Op.isUndefOrPoison() &&
(Op.getNode() == CurSVN || is_contained(CurSVN->ops(), Op));
})) {
SVN = CurSVN;
@@ -24548,7 +24556,7 @@ static SDValue combineConcatVectorOfShuffleAndItsOperands(
// from the second operand, must be adjusted.
SmallVector<int, 16> AdjustedMask;
AdjustedMask.reserve(SVN->getMask().size());
- assert(SVN->getOperand(1).isUndef() && "Expected unary shuffle!");
+ assert(SVN->getOperand(1).isUndefOrPoison() && "Expected unary shuffle!");
append_range(AdjustedMask, SVN->getMask());
// Identity masks for the operands of the (padded) shuffle.
@@ -24566,7 +24574,7 @@ static SDValue combineConcatVectorOfShuffleAndItsOperands(
SmallVector<int, 32> Mask;
Mask.reserve(VT.getVectorNumElements());
for (SDValue Op : N->ops()) {
- assert(!Op.isUndef() && "Not expecting to concatenate UNDEF.");
+ assert(!Op.isUndefOrPoison() && "Not expecting to concatenate UNDEF.");
if (Op.getNode() == SVN) {
append_range(Mask, AdjustedMask);
continue;
@@ -24592,7 +24600,7 @@ static SDValue combineConcatVectorOfShuffleAndItsOperands(
for (auto I : zip(SVN->ops(), ShufOps)) {
SDValue ShufOp = std::get<0>(I);
SDValue &NewShufOp = std::get<1>(I);
- if (ShufOp.isUndef())
+ if (ShufOp.isUndefOrPoison())
NewShufOp = DAG.getUNDEF(VT);
else {
SmallVector<SDValue, 2> ShufOpParts(N->getNumOperands(),
@@ -24617,7 +24625,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
// Optimize concat_vectors where all but the first of the vectors are undef.
if (all_of(drop_begin(N->ops()),
- [](const SDValue &Op) { return Op.isUndef(); })) {
+ [](const SDValue &Op) { return Op.isUndefOrPoison(); })) {
SDValue In = N->getOperand(0);
assert(In.getValueType().isVector() && "Must concat vectors");
@@ -24684,7 +24692,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
// fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...))
// -> (BUILD_VECTOR A, B, ..., C, D, ...)
auto IsBuildVectorOrUndef = [](const SDValue &Op) {
- return ISD::UNDEF == Op.getOpcode() || ISD::BUILD_VECTOR == Op.getOpcode();
+ return Op.isUndefOrPoison() || ISD::BUILD_VECTOR == Op.getOpcode();
};
if (llvm::all_of(N->ops(), IsBuildVectorOrUndef)) {
SmallVector<SDValue, 8> Opnds;
@@ -24708,7 +24716,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
EVT OpVT = Op.getValueType();
unsigned NumElts = OpVT.getVectorNumElements();
- if (ISD::UNDEF == Op.getOpcode())
+ if (Op.isUndefOrPoison())
Opnds.append(NumElts, DAG.getUNDEF(MinVT));
if (ISD::BUILD_VECTOR == Op.getOpcode()) {
@@ -24763,7 +24771,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
SDValue Op = N->getOperand(i);
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
continue;
// Check if this is the identity extract:
@@ -25122,7 +25130,7 @@ static SDValue foldExtractSubvectorFromShuffleVector(SDNode *N,
SDValue Op = WideShuffleVector->getOperand(WideShufOpIdx);
- if (Op.isUndef()) {
+ if (Op.isUndefOrPoison()) {
// Picking from an undef operand. Let's adjust mask instead.
NewMask.emplace_back(-1);
continue;
@@ -25201,7 +25209,7 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode *N) {
SDLoc DL(N);
// Extract from UNDEF is UNDEF.
- if (V.isUndef())
+ if (V.isUndefOrPoison())
return DAG.getUNDEF(NVT);
if (TLI.isOperationLegalOrCustomOrPromote(ISD::LOAD, NVT))
@@ -25411,7 +25419,8 @@ static SDValue foldShuffleOfConcatUndefs(ShuffleVectorSDNode *Shuf,
SDValue N0 = Shuf->getOperand(0), N1 = Shuf->getOperand(1);
if (N0.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
N1.getOpcode() != ISD::CONCAT_VECTORS || N1.getNumOperands() != 2 ||
- !N0.getOperand(1).isUndef() || !N1.getOperand(1).isUndef())
+ !N0.getOperand(1).isUndefOrPoison() ||
+ !N1.getOperand(1).isUndefOrPoison())
return SDValue();
// Split the wide shuffle mask into halves. Any mask element that is accessing
@@ -25473,7 +25482,7 @@ static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) {
// Special case: shuffle(concat(A,B)) can be more efficiently represented
// as concat(shuffle(A,B),UNDEF) if the shuffle doesn't set any of the high
// half vector elements.
- if (NumElemsPerConcat * 2 == NumElts && N1.isUndef() &&
+ if (NumElemsPerConcat * 2 == NumElts && N1.isUndefOrPoison() &&
llvm::all_of(Mask.slice(NumElemsPerConcat, NumElemsPerConcat),
IsUndefMaskElt)) {
N0 = DAG.getVectorShuffle(ConcatVT, SDLoc(N), N0.getOperand(0),
@@ -25546,7 +25555,7 @@ static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN,
// If only one of N1,N2 is constant, bail out if it is not ALL_ZEROS as
// discussed above.
- if (!N1.isUndef()) {
+ if (!N1.isUndefOrPoison()) {
if (!N1->hasOneUse())
return SDValue();
@@ -25589,7 +25598,7 @@ static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN,
// generating a splat; semantically, this is fine, but it's likely to
// generate low-quality code if the target can't reconstruct an appropriate
// shuffle.
- if (!Op.isUndef() && !isIntOrFPConstant(Op))
+ if (!Op.isUndefOrPoison() && !isIntOrFPConstant(Op))
if (!IsSplat && !DuplicateOps.insert(Op).second)
return SDValue();
@@ -25604,10 +25613,11 @@ static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN,
SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
if (SVT != VT.getScalarType())
for (SDValue &Op : Ops)
- Op = Op.isUndef() ? DAG.getUNDEF(SVT)
- : (TLI.isZExtFree(Op.getValueType(), SVT)
- ? DAG.getZExtOrTrunc(Op, SDLoc(SVN), SVT)
- : DAG.getSExtOrTrunc(Op, SDLoc(SVN), SVT));
+ Op = Op.isUndefOrPoison()
+ ? DAG.getUNDEF(SVT)
+ : (TLI.isZExtFree(Op.getValueType(), SVT)
+ ? DAG.getZExtOrTrunc(Op, SDLoc(SVN), SVT)
+ : DAG.getSExtOrTrunc(Op, SDLoc(SVN), SVT));
return DAG.getBuildVector(VT, SDLoc(SVN), Ops);
}
@@ -25882,7 +25892,7 @@ static SDValue combineShuffleOfSplatVal(ShuffleVectorSDNode *Shuf,
EVT VT = Shuf->getValueType(0);
unsigned NumElts = VT.getVectorNumElements();
- if (!Shuf->getOperand(1).isUndef())
+ if (!Shuf->getOperand(1).isUndefOrPoison())
return SDValue();
// See if this unary non-splat shuffle actually *is* a splat shuffle,
@@ -25989,11 +25999,11 @@ static SDValue combineShuffleOfBitcast(ShuffleVectorSDNode *SVN,
return SDValue();
EVT InVT = Op0.getOperand(0).getValueType();
if (!InVT.isVector() ||
- (!Op1.isUndef() && (Op1.getOpcode() != ISD::BITCAST ||
- Op1.getOperand(0).getValueType() != InVT)))
+ (!Op1.isUndefOrPoison() && (Op1.getOpcode() != ISD::BITCAST ||
+ Op1.getOperand(0).getValueType() != InVT)))
return SDValue();
if (isAnyConstantBuildVector(Op0.getOperand(0)) &&
- (Op1.isUndef() || isAnyConstantBuildVector(Op1.getOperand(0))))
+ (Op1.isUndefOrPoison() || isAnyConstantBuildVector(Op1.getOperand(0))))
return SDValue();
int VTLanes = VT.getVectorNumElements();
@@ -26018,7 +26028,7 @@ static SDValue combineShuffleOfBitcast(ShuffleVectorSDNode *SVN,
// original type.
SDLoc DL(SVN);
Op0 = Op0.getOperand(0);
- Op1 = Op1.isUndef() ? DAG.getUNDEF(InVT) : Op1.getOperand(0);
+ Op1 = Op1.isUndefOrPoison() ? DAG.getUNDEF(InVT) : Op1.getOperand(0);
SDValue NewShuf = DAG.getVectorShuffle(InVT, DL, Op0, Op1, NewMask);
return DAG.getBitcast(VT, NewShuf);
}
@@ -26027,10 +26037,10 @@ static SDValue combineShuffleOfBitcast(ShuffleVectorSDNode *SVN,
/// shuf (shuf X, undef, InnerMask), undef, OuterMask --> splat X
static SDValue formSplatFromShuffles(ShuffleVectorSDNode *OuterShuf,
SelectionDAG &DAG) {
- if (!OuterShuf->getOperand(1).isUndef())
+ if (!OuterShuf->getOperand(1).isUndefOrPoison())
return SDValue();
auto *InnerShuf = dyn_cast<ShuffleVectorSDNode>(OuterShuf->getOperand(0));
- if (!InnerShuf || !InnerShuf->getOperand(1).isUndef())
+ if (!InnerShuf || !InnerShuf->getOperand(1).isUndefOrPoison())
return SDValue();
ArrayRef<int> OuterMask = OuterShuf->getMask();
@@ -26159,7 +26169,7 @@ static SDValue replaceShuffleOfInsert(ShuffleVectorSDNode *Shuf,
static SDValue simplifyShuffleOfShuffle(ShuffleVectorSDNode *Shuf) {
// shuf (shuf0 X, Y, Mask0), undef, Mask
auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(Shuf->getOperand(0));
- if (!Shuf0 || !Shuf->getOperand(1).isUndef())
+ if (!Shuf0 || !Shuf->getOperand(1).isUndefOrPoison())
return SDValue();
ArrayRef<int> Mask = Shuf->getMask();
@@ -26190,7 +26200,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG");
// Canonicalize shuffle undef, undef -> undef
- if (N0.isUndef() && N1.isUndef())
+ if (N0.isUndefOrPoison() && N1.isUndefOrPoison())
return DAG.getUNDEF(VT);
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
@@ -26201,11 +26211,11 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
createUnaryMask(SVN->getMask(), NumElts));
// Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getCommutedVectorShuffle(*SVN);
// Remove references to rhs if it is undef
- if (N1.isUndef()) {
+ if (N1.isUndefOrPoison()) {
bool Changed = false;
SmallVector<int, 8> NewMask;
for (unsigned i = 0; i != NumElts; ++i) {
@@ -26298,7 +26308,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
SDValue Base;
bool AllSame = true;
for (unsigned i = 0; i != NumElts; ++i) {
- if (!V->getOperand(i).isUndef()) {
+ if (!V->getOperand(i).isUndefOrPoison()) {
Base = V->getOperand(i);
break;
}
@@ -26355,11 +26365,10 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
if (SDValue V = combineTruncationShuffle(SVN, DAG))
return V;
- if (N0.getOpcode() == ISD::CONCAT_VECTORS &&
- Level < AfterLegalizeVectorOps &&
- (N1.isUndef() ||
- (N1.getOpcode() == ISD::CONCAT_VECTORS &&
- N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) {
+ if (N0.getOpcode() == ISD::CONCAT_VECTORS && Level < AfterLegalizeVectorOps &&
+ (N1.isUndefOrPoison() ||
+ (N1.getOpcode() == ISD::CONCAT_VECTORS &&
+ N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) {
if (SDValue V = partitionShuffleOfConcats(N, DAG))
return V;
}
@@ -26367,9 +26376,8 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
// A shuffle of a concat of the same narrow vector can be reduced to use
// only low-half elements of a concat with undef:
// shuf (concat X, X), undef, Mask --> shuf (concat X, undef), undef, Mask'
- if (N0.getOpcode() == ISD::CONCAT_VECTORS && N1.isUndef() &&
- N0.getNumOperands() == 2 &&
- N0.getOperand(0) == N0.getOperand(1)) {
+ if (N0.getOpcode() == ISD::CONCAT_VECTORS && N1.isUndefOrPoison() &&
+ N0.getNumOperands() == 2 && N0.getOperand(0) == N0.getOperand(1)) {
int HalfNumElts = (int)NumElts / 2;
SmallVector<int, 8> NewMask;
for (unsigned i = 0; i != NumElts; ++i) {
@@ -26516,7 +26524,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
// attempt to merge the 2 shuffles and suitably bitcast the inputs/output
// back to their original types.
if (N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() &&
- N1.isUndef() && Level < AfterLegalizeVectorOps &&
+ N1.isUndefOrPoison() && Level < AfterLegalizeVectorOps &&
TLI.isTypeLegal(VT)) {
SDValue BC0 = peekThroughOneUseBitcasts(N0);
@@ -26618,7 +26626,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
}
// Simple case where 'CurrentVec' is UNDEF.
- if (CurrentVec.isUndef()) {
+ if (CurrentVec.isUndefOrPoison()) {
Mask.push_back(-1);
continue;
}
@@ -26652,7 +26660,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
SDValue InnerVec = (InnerIdx < (int)NumElts)
? CurrentSVN->getOperand(0)
: CurrentSVN->getOperand(1);
- if (InnerVec.isUndef()) {
+ if (InnerVec.isUndefOrPoison()) {
Mask.push_back(-1);
continue;
}
@@ -26704,7 +26712,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
SDValue SV0 = N1->getOperand(0);
SDValue SV1 = N1->getOperand(1);
bool HasSameOp0 = N0 == SV0;
- bool IsSV1Undef = SV1.isUndef();
+ bool IsSV1Undef = SV1.isUndefOrPoison();
if (HasSameOp0 || IsSV1Undef || N0 == SV1)
// Commute the operands of this shuffle so merging below will trigger.
return DAG.getCommutedVectorShuffle(*SVN);
@@ -26756,13 +26764,13 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
// shuffle(bop(shuffle(x,y),shuffle(z,w)),bop(shuffle(a,b),shuffle(c,d)))
unsigned SrcOpcode = N0.getOpcode();
if (TLI.isBinOp(SrcOpcode) && N->isOnlyUserOf(N0.getNode()) &&
- (N1.isUndef() ||
+ (N1.isUndefOrPoison() ||
(SrcOpcode == N1.getOpcode() && N->isOnlyUserOf(N1.getNode())))) {
// Get binop source ops, or just pass on the undef.
SDValue Op00 = N0.getOperand(0);
SDValue Op01 = N0.getOperand(1);
- SDValue Op10 = N1.isUndef() ? N1 : N1.getOperand(0);
- SDValue Op11 = N1.isUndef() ? N1 : N1.getOperand(1);
+ SDValue Op10 = N1.isUndefOrPoison() ? N1 : N1.getOperand(0);
+ SDValue Op11 = N1.isUndefOrPoison() ? N1 : N1.getOperand(1);
// TODO: We might be able to relax the VT check but we don't currently
// have any isBinOp() that has different result/ops VTs so play safe until
// we have test coverage.
@@ -26943,13 +26951,13 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
uint64_t InsIdx = N->getConstantOperandVal(2);
// If inserting an UNDEF, just return the original vector.
- if (N1.isUndef())
+ if (N1.isUndefOrPoison())
return N0;
// If this is an insert of an extracted vector into an undef vector, we can
// just use the input to the extract if the types match, and can simplify
// in some cases even if they don't.
- if (N0.isUndef() && N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
+ if (N0.isUndefOrPoison() && N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
N1.getOperand(1) == N2) {
EVT SrcVT = N1.getOperand(0).getValueType();
if (SrcVT == VT)
@@ -26977,7 +26985,7 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
// Simplify scalar inserts into an undef vector:
// insert_subvector undef, (splat X), N2 -> splat X
- if (N0.isUndef() && N1.getOpcode() == ISD::SPLAT_VECTOR)
+ if (N0.isUndefOrPoison() && N1.getOpcode() == ISD::SPLAT_VECTOR)
if (DAG.isConstantValueOfAnyType(N1.getOperand(0)) || N1.hasOneUse())
return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, N1.getOperand(0));
@@ -26985,7 +26993,7 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
// number of elements, just use the bitcast input of the extract.
// i.e. INSERT_SUBVECTOR UNDEF (BITCAST N1) N2 ->
// BITCAST (INSERT_SUBVECTOR UNDEF N1 N2)
- if (N0.isUndef() && N1.getOpcode() == ISD::BITCAST &&
+ if (N0.isUndefOrPoison() && N1.getOpcode() == ISD::BITCAST &&
N1.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
N1.getOperand(0).getOperand(1) == N2 &&
N1.getOperand(0).getOperand(0).getValueType().getVectorElementCount() ==
@@ -27025,8 +27033,8 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
// Eliminate an intermediate insert into an undef vector:
// insert_subvector undef, (insert_subvector undef, X, 0), 0 -->
// insert_subvector undef, X, 0
- if (N0.isUndef() && N1.getOpcode() == ISD::INSERT_SUBVECTOR &&
- N1.getOperand(0).isUndef() && isNullConstant(N1.getOperand(2)) &&
+ if (N0.isUndefOrPoison() && N1.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ N1.getOperand(0).isUndefOrPoison() && isNullConstant(N1.getOperand(2)) &&
isNullConstant(N2))
return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0,
N1.getOperand(1), N2);
@@ -27034,13 +27042,13 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
// Push subvector bitcasts to the output, adjusting the index as we go.
// insert_subvector(bitcast(v), bitcast(s), c1)
// -> bitcast(insert_subvector(v, s, c2))
- if ((N0.isUndef() || N0.getOpcode() == ISD::BITCAST) &&
+ if ((N0.isUndefOrPoison() || N0.getOpcode() == ISD::BITCAST) &&
N1.getOpcode() == ISD::BITCAST) {
SDValue N0Src = peekThroughBitcasts(N0);
SDValue N1Src = peekThroughBitcasts(N1);
EVT N0SrcSVT = N0Src.getValueType().getScalarType();
EVT N1SrcSVT = N1Src.getValueType().getScalarType();
- if ((N0.isUndef() || N0SrcSVT == N1SrcSVT) &&
+ if ((N0.isUndefOrPoison() || N0SrcSVT == N1SrcSVT) &&
N0Src.getValueType().isVector() && N1Src.getValueType().isVector()) {
EVT NewVT;
SDLoc DL(N);
@@ -27186,9 +27194,9 @@ SDValue DAGCombiner::visitVECREDUCE(SDNode *N) {
SDValue Vec = N0.getOperand(0);
SDValue Subvec = N0.getOperand(1);
if ((Opcode == ISD::VECREDUCE_OR &&
- (N0.getOperand(0).isUndef() || isNullOrNullSplat(Vec))) ||
+ (N0.getOperand(0).isUndefOrPoison() || isNullOrNullSplat(Vec))) ||
(Opcode == ISD::VECREDUCE_AND &&
- (N0.getOperand(0).isUndef() || isAllOnesOrAllOnesSplat(Vec))))
+ (N0.getOperand(0).isUndefOrPoison() || isAllOnesOrAllOnesSplat(Vec))))
return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), Subvec);
}
@@ -27307,7 +27315,8 @@ SDValue DAGCombiner::visitGET_FPENV_MEM(SDNode *N) {
return SDValue();
}
if (!LdNode || !LdNode->isSimple() || LdNode->isIndexed() ||
- !LdNode->getOffset().isUndef() || LdNode->getMemoryVT() != MemVT ||
+ !LdNode->getOffset().isUndefOrPoison() ||
+ LdNode->getMemoryVT() != MemVT ||
!LdNode->getChain().reachesChainWithoutSideEffects(SDValue(N, 0)))
return SDValue();
@@ -27325,7 +27334,8 @@ SDValue DAGCombiner::visitGET_FPENV_MEM(SDNode *N) {
}
}
if (!StNode || !StNode->isSimple() || StNode->isIndexed() ||
- !StNode->getOffset().isUndef() || StNode->getMemoryVT() != MemVT ||
+ !StNode->getOffset().isUndefOrPoison() ||
+ StNode->getMemoryVT() != MemVT ||
!StNode->getChain().reachesChainWithoutSideEffects(SDValue(LdNode, 1)))
return SDValue();
@@ -27356,7 +27366,8 @@ SDValue DAGCombiner::visitSET_FPENV_MEM(SDNode *N) {
return SDValue();
}
if (!StNode || !StNode->isSimple() || StNode->isIndexed() ||
- !StNode->getOffset().isUndef() || StNode->getMemoryVT() != MemVT ||
+ !StNode->getOffset().isUndefOrPoison() ||
+ StNode->getMemoryVT() != MemVT ||
!Chain.reachesChainWithoutSideEffects(SDValue(StNode, 0)))
return SDValue();
@@ -27365,7 +27376,8 @@ SDValue DAGCombiner::visitSET_FPENV_MEM(SDNode *N) {
SDValue StValue = StNode->getValue();
auto *LdNode = dyn_cast<LoadSDNode>(StValue);
if (!LdNode || !LdNode->isSimple() || LdNode->isIndexed() ||
- !LdNode->getOffset().isUndef() || LdNode->getMemoryVT() != MemVT ||
+ !LdNode->getOffset().isUndefOrPoison() ||
+ LdNode->getMemoryVT() != MemVT ||
!StNode->getChain().reachesChainWithoutSideEffects(SDValue(LdNode, 1)))
return SDValue();
@@ -27414,7 +27426,7 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
SDValue Elt = RHS.getOperand(EltIdx);
// X & undef --> 0 (not undef). So this lane must be converted to choose
// from the zero constant vector (same as if the element had all 0-bits).
- if (Elt.isUndef()) {
+ if (Elt.isUndefOrPoison()) {
Indices.push_back(i + NumSubElts);
continue;
}
@@ -27582,7 +27594,8 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N, const SDLoc &DL) {
auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(LHS);
auto *Shuf1 = dyn_cast<ShuffleVectorSDNode>(RHS);
if (Shuf0 && Shuf1 && Shuf0->getMask().equals(Shuf1->getMask()) &&
- LHS.getOperand(1).isUndef() && RHS.getOperand(1).isUndef() &&
+ LHS.getOperand(1).isUndefOrPoison() &&
+ RHS.getOperand(1).isUndefOrPoison() &&
(LHS.hasOneUse() || RHS.hasOneUse() || LHS == RHS)) {
SDValue NewBinOp = DAG.getNode(Opcode, DL, VT, LHS.getOperand(0),
RHS.getOperand(0), Flags);
@@ -27597,7 +27610,7 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N, const SDLoc &DL) {
// of an inserted scalar because that may be optimized better by
// load-folding or other target-specific behaviors.
if (isConstOrConstSplat(RHS) && Shuf0 && all_equal(Shuf0->getMask()) &&
- Shuf0->hasOneUse() && Shuf0->getOperand(1).isUndef() &&
+ Shuf0->hasOneUse() && Shuf0->getOperand(1).isUndefOrPoison() &&
Shuf0->getOperand(0).getOpcode() != ISD::INSERT_VECTOR_ELT) {
// binop (splat X), (splat C) --> splat (binop X, C)
SDValue X = Shuf0->getOperand(0);
@@ -27606,7 +27619,7 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N, const SDLoc &DL) {
Shuf0->getMask());
}
if (isConstOrConstSplat(LHS) && Shuf1 && all_equal(Shuf1->getMask()) &&
- Shuf1->hasOneUse() && Shuf1->getOperand(1).isUndef() &&
+ Shuf1->hasOneUse() && Shuf1->getOperand(1).isUndefOrPoison() &&
Shuf1->getOperand(0).getOpcode() != ISD::INSERT_VECTOR_ELT) {
// binop (splat C), (splat X) --> splat (binop C, X)
SDValue X = Shuf1->getOperand(0);
@@ -27620,8 +27633,10 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N, const SDLoc &DL) {
// the binary operation ahead of insertion may allow using a narrower vector
// instruction that has better performance than the wide version of the op:
// VBinOp (ins undef, X, Z), (ins undef, Y, Z) --> ins VecC, (VBinOp X, Y), Z
- if (LHS.getOpcode() == ISD::INSERT_SUBVECTOR && LHS.getOperand(0).isUndef() &&
- RHS.getOpcode() == ISD::INSERT_SUBVECTOR && RHS.getOperand(0).isUndef() &&
+ if (LHS.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ LHS.getOperand(0).isUndefOrPoison() &&
+ RHS.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ RHS.getOperand(0).isUndefOrPoison() &&
LHS.getOperand(2) == RHS.getOperand(2) &&
(LHS.hasOneUse() || RHS.hasOneUse())) {
SDValue X = LHS.getOperand(1);
@@ -27643,7 +27658,7 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N, const SDLoc &DL) {
auto ConcatWithConstantOrUndef = [](SDValue Concat) {
return Concat.getOpcode() == ISD::CONCAT_VECTORS &&
all_of(drop_begin(Concat->ops()), [](const SDValue &Op) {
- return Op.isUndef() ||
+ return Op.isUndefOrPoison() ||
ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
});
};
@@ -29040,7 +29055,7 @@ bool DAGCombiner::parallelizeChainedStores(StoreSDNode *St) {
return false;
// Do not handle stores to undef base pointers.
- if (BasePtr.getBase().isUndef())
+ if (BasePtr.getBase().isUndefOrPoison())
return false;
// Do not handle stores to opaque types
@@ -29152,7 +29167,7 @@ bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) {
return false;
// Do not handle stores to undef base pointers.
- if (BasePtr.getBase().isUndef())
+ if (BasePtr.getBase().isUndefOrPoison())
return false;
// Directly improve a chain of disjoint stores starting at St.
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index f61928a66eb3cff..2204e31cbe7d36d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -977,6 +977,22 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
TargetLowering::LegalizeAction Action = TargetLowering::Legal;
bool SimpleFinishLegalizing = true;
switch (Node->getOpcode()) {
+ // FIXME: If the node represents a poison value, replace it with an undef
+ // value.
+ // A poison value results from an erroneous operation but does not cause
+ // immediate undefined behavior, allowing speculative execution.
+ // Since most operations propagate poison, it is valid to replace poison
+ // with an undef value, which can take any legal value of the same type.
+ // This ensures that downstream computations do not rely on poison semantics.
+ // Poison is more restrictive than undef. Since we replace poison with undef
+ // here, the poison information will be lost after the code is executed. In
+ // the futher, If we need to retain the poison information after the code is
+ // executed, we will need to modify the code accordingly.
+ case ISD::POISON: {
+ SDValue UndefNode = DAG.getUNDEF(Node->getValueType(0));
+ ReplaceNode(Node, UndefNode.getNode());
+ break;
+ }
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_WO_CHAIN:
case ISD::INTRINSIC_VOID:
@@ -1548,7 +1564,8 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
// Store (in the right endianness) the elements to memory.
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
// Ignore undef elements.
- if (Node->getOperand(i).isUndef()) continue;
+ if (Node->getOperand(i).isUndefOrPoison())
+ continue;
unsigned Offset = TypeByteSize*i;
@@ -1869,7 +1886,7 @@ ExpandBVWithShuffles(SDNode *Node, SelectionDAG &DAG,
NewIntermedVals;
for (unsigned i = 0; i < NumElems; ++i) {
SDValue V = Node->getOperand(i);
- if (V.isUndef())
+ if (V.isUndefOrPoison())
continue;
SDValue Vec;
@@ -1961,7 +1978,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
bool isConstant = true;
for (unsigned i = 0; i < NumElems; ++i) {
SDValue V = Node->getOperand(i);
- if (V.isUndef())
+ if (V.isUndefOrPoison())
continue;
if (i > 0)
isOnlyLowElement = false;
@@ -2004,7 +2021,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
CI->getZExtValue()));
}
} else {
- assert(Node->getOperand(i).isUndef());
+ assert(Node->getOperand(i).isUndefOrPoison());
Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext());
CV.push_back(UndefValue::get(OpNTy));
}
@@ -2021,7 +2038,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
SmallSet<SDValue, 16> DefinedValues;
for (unsigned i = 0; i < NumElems; ++i) {
- if (Node->getOperand(i).isUndef())
+ if (Node->getOperand(i).isUndefOrPoison())
continue;
DefinedValues.insert(Node->getOperand(i));
}
@@ -2031,7 +2048,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
SmallVector<int, 8> ShuffleVec(NumElems, -1);
for (unsigned i = 0; i < NumElems; ++i) {
SDValue V = Node->getOperand(i);
- if (V.isUndef())
+ if (V.isUndefOrPoison())
continue;
ShuffleVec[i] = V == Value1 ? 0 : NumElems;
}
@@ -3136,6 +3153,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
for (unsigned i = 0; i < Node->getNumValues(); i++)
Results.push_back(Node->getOperand(i));
break;
+ case ISD::POISON:
case ISD::UNDEF: {
EVT VT = Node->getValueType(0);
if (VT.isInteger())
@@ -4002,7 +4020,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Node->getOperand(2));
} else {
// We test only the i1 bit. Skip the AND if UNDEF or another AND.
- if (Tmp2.isUndef() ||
+ if (Tmp2.isUndefOrPoison() ||
(Tmp2.getOpcode() == ISD::AND && isOneConstant(Tmp2.getOperand(1))))
Tmp3 = Tmp2;
else
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index a0f29496df77772..a1968631cc504dc 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -118,6 +118,7 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::VP_SRL: Res = PromoteIntRes_SRL(N); break;
case ISD::VP_TRUNCATE:
case ISD::TRUNCATE: Res = PromoteIntRes_TRUNCATE(N); break;
+ case ISD::POISON:
case ISD::UNDEF: Res = PromoteIntRes_UNDEF(N); break;
case ISD::VAARG: Res = PromoteIntRes_VAARG(N); break;
case ISD::VSCALE: Res = PromoteIntRes_VSCALE(N); break;
@@ -2911,6 +2912,7 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, ResNo, Lo, Hi); break;
case ISD::SELECT: SplitRes_Select(N, Lo, Hi); break;
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
+ case ISD::POISON:
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
case ISD::FREEZE: SplitRes_FREEZE(N, Lo, Hi); break;
case ISD::SETCC: ExpandIntRes_SETCC(N, Lo, Hi); break;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index da793a34879b8e9..e9b934655f67f6f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -71,6 +71,7 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SELECT: R = ScalarizeVecRes_SELECT(N); break;
case ISD::SELECT_CC: R = ScalarizeVecRes_SELECT_CC(N); break;
case ISD::SETCC: R = ScalarizeVecRes_SETCC(N); break;
+ case ISD::POISON:
case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(N); break;
case ISD::VECTOR_SHUFFLE: R = ScalarizeVecRes_VECTOR_SHUFFLE(N); break;
case ISD::IS_FPCLASS: R = ScalarizeVecRes_IS_FPCLASS(N); break;
@@ -657,7 +658,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_UNDEF(SDNode *N) {
SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N) {
// Figure out if the scalar is the LHS or RHS and return it.
SDValue Arg = N->getOperand(2).getOperand(0);
- if (Arg.isUndef())
+ if (Arg.isUndefOrPoison())
return DAG.getUNDEF(N->getValueType(0).getVectorElementType());
unsigned Op = !cast<ConstantSDNode>(Arg)->isZero();
return GetScalarizedVector(N->getOperand(Op));
@@ -1118,6 +1119,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::VP_MERGE:
case ISD::VP_SELECT: SplitRes_Select(N, Lo, Hi); break;
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
+ case ISD::POISON:
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
case ISD::BITCAST: SplitVecRes_BITCAST(N, Lo, Hi); break;
case ISD::BUILD_VECTOR: SplitVecRes_BUILD_VECTOR(N, Lo, Hi); break;
@@ -2138,7 +2140,8 @@ void DAGTypeLegalizer::SplitVecRes_VP_LOAD(VPLoadSDNode *LD, SDValue &Lo,
SDValue Ch = LD->getChain();
SDValue Ptr = LD->getBasePtr();
SDValue Offset = LD->getOffset();
- assert(Offset.isUndef() && "Unexpected indexed variable-length load offset");
+ assert(Offset.isUndefOrPoison() &&
+ "Unexpected indexed variable-length load offset");
Align Alignment = LD->getOriginalAlign();
SDValue Mask = LD->getMask();
SDValue EVL = LD->getVectorLength();
@@ -2212,7 +2215,7 @@ void DAGTypeLegalizer::SplitVecRes_VP_STRIDED_LOAD(VPStridedLoadSDNode *SLD,
SDValue &Lo, SDValue &Hi) {
assert(SLD->isUnindexed() &&
"Indexed VP strided load during type legalization!");
- assert(SLD->getOffset().isUndef() &&
+ assert(SLD->getOffset().isUndefOrPoison() &&
"Unexpected indexed variable-length load offset");
SDLoc DL(SLD);
@@ -2299,7 +2302,7 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
SDValue Ch = MLD->getChain();
SDValue Ptr = MLD->getBasePtr();
SDValue Offset = MLD->getOffset();
- assert(Offset.isUndef() && "Unexpected indexed masked load offset");
+ assert(Offset.isUndefOrPoison() && "Unexpected indexed masked load offset");
SDValue Mask = MLD->getMask();
SDValue PassThru = MLD->getPassThru();
Align Alignment = MLD->getOriginalAlign();
@@ -2526,7 +2529,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_COMPRESS(SDNode *N, SDValue &Lo,
MachinePointerInfo::getUnknownStack(MF));
SDValue Compressed = DAG.getLoad(VecVT, DL, Chain, StackPtr, PtrInfo);
- if (!Passthru.isUndef()) {
+ if (!Passthru.isUndefOrPoison()) {
Compressed =
DAG.getNode(ISD::VSELECT, DL, VecVT, Mask, Compressed, Passthru);
}
@@ -2808,7 +2811,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
if (Idx == PoisonMaskElem)
continue;
unsigned SrcRegIdx = Idx / NewElts;
- if (Inputs[SrcRegIdx].isUndef()) {
+ if (Inputs[SrcRegIdx].isUndefOrPoison()) {
Idx = PoisonMaskElem;
continue;
}
@@ -2840,7 +2843,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
if (Idx == PoisonMaskElem)
continue;
unsigned SrcRegIdx = Idx / NewElts;
- if (Inputs[SrcRegIdx].isUndef()) {
+ if (Inputs[SrcRegIdx].isUndefOrPoison()) {
Idx = PoisonMaskElem;
continue;
}
@@ -2848,7 +2851,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
getTypeAction(Inputs[SrcRegIdx].getValueType());
if (Inputs[SrcRegIdx].getOpcode() == ISD::CONCAT_VECTORS &&
Inputs[SrcRegIdx].getNumOperands() == 2 &&
- !Inputs[SrcRegIdx].getOperand(1).isUndef() &&
+ !Inputs[SrcRegIdx].getOperand(1).isUndefOrPoison() &&
(TypeAction == TargetLowering::TypeLegal ||
TypeAction == TargetLowering::TypeWidenVector))
UsedSubVector.set(2 * SrcRegIdx + (Idx % NewElts) / (NewElts / 2));
@@ -2906,11 +2909,11 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
if (Shuffle->getOperand(0).getValueType() != NewVT)
continue;
int Op = -1;
- if (!Inputs[I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
- !Shuffle->isSplat()) {
+ if (!Inputs[I].hasOneUse() &&
+ Shuffle->getOperand(1).isUndefOrPoison() && !Shuffle->isSplat()) {
Op = 0;
} else if (!Inputs[I].hasOneUse() &&
- !Shuffle->getOperand(1).isUndef()) {
+ !Shuffle->getOperand(1).isUndefOrPoison()) {
// Find the only used operand, if possible.
for (int &Idx : Mask) {
if (Idx == PoisonMaskElem)
@@ -2937,7 +2940,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
if (Op < 0) {
// Try to check if one of the shuffle operands is used already.
for (int OpIdx = 0; OpIdx < 2; ++OpIdx) {
- if (Shuffle->getOperand(OpIdx).isUndef())
+ if (Shuffle->getOperand(OpIdx).isUndefOrPoison())
continue;
auto *It = find(Inputs, Shuffle->getOperand(OpIdx));
if (It == std::end(Inputs))
@@ -2994,7 +2997,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
for (const auto &I : Inputs) {
if (IsConstant(I))
UniqueConstantInputs.insert(I);
- else if (!I.isUndef())
+ else if (!I.isUndefOrPoison())
UniqueInputs.insert(I);
}
// Adjust mask in case of reused inputs. Also, need to insert constant
@@ -3007,7 +3010,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
if (Idx == PoisonMaskElem)
continue;
unsigned SrcRegIdx = Idx / NewElts;
- if (Inputs[SrcRegIdx].isUndef()) {
+ if (Inputs[SrcRegIdx].isUndefOrPoison()) {
Idx = PoisonMaskElem;
continue;
}
@@ -3792,7 +3795,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_VP_STORE(VPStoreSDNode *N, unsigned OpNo) {
SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
SDValue Offset = N->getOffset();
- assert(Offset.isUndef() && "Unexpected VP store offset");
+ assert(Offset.isUndefOrPoison() && "Unexpected VP store offset");
SDValue Mask = N->getMask();
SDValue EVL = N->getVectorLength();
SDValue Data = N->getValue();
@@ -3869,7 +3872,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_VP_STORE(VPStoreSDNode *N, unsigned OpNo) {
SDValue DAGTypeLegalizer::SplitVecOp_VP_STRIDED_STORE(VPStridedStoreSDNode *N,
unsigned OpNo) {
assert(N->isUnindexed() && "Indexed vp_strided_store of a vector?");
- assert(N->getOffset().isUndef() && "Unexpected VP strided store offset");
+ assert(N->getOffset().isUndefOrPoison() &&
+ "Unexpected VP strided store offset");
SDLoc DL(N);
@@ -3946,7 +3950,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_MSTORE(MaskedStoreSDNode *N,
SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
SDValue Offset = N->getOffset();
- assert(Offset.isUndef() && "Unexpected indexed masked store offset");
+ assert(Offset.isUndefOrPoison() && "Unexpected indexed masked store offset");
SDValue Mask = N->getMask();
SDValue Data = N->getValue();
Align Alignment = N->getOriginalAlign();
@@ -4553,6 +4557,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SELECT_CC: Res = WidenVecRes_SELECT_CC(N); break;
case ISD::VP_SETCC:
case ISD::SETCC: Res = WidenVecRes_SETCC(N); break;
+ case ISD::POISON:
case ISD::UNDEF: Res = WidenVecRes_UNDEF(N); break;
case ISD::VECTOR_SHUFFLE:
Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N));
@@ -5772,7 +5777,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
// The inputs and the result are widen to the same value.
unsigned i;
for (i=1; i < NumOperands; ++i)
- if (!N->getOperand(i).isUndef())
+ if (!N->getOperand(i).isUndefOrPoison())
break;
if (i == NumOperands)
@@ -6198,7 +6203,7 @@ static inline bool isSETCCorConvertedSETCC(SDValue N) {
N = N.getOperand(0);
else if (N.getOpcode() == ISD::CONCAT_VECTORS) {
for (unsigned i = 1; i < N->getNumOperands(); ++i)
- if (!N->getOperand(i)->isUndef())
+ if (!N->getOperand(i)->isUndefOrPoison())
return false;
N = N.getOperand(0);
}
@@ -7039,7 +7044,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_CONCAT_VECTORS(SDNode *N) {
if (VT == TLI.getTypeToTransformTo(*DAG.getContext(), InVT)) {
unsigned i;
for (i = 1; i < NumOperands; ++i)
- if (!N->getOperand(i).isUndef())
+ if (!N->getOperand(i).isUndefOrPoison())
break;
if (i == NumOperands)
@@ -7100,7 +7105,8 @@ SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(SDNode *N) {
// We need to make sure that the indices are still valid, otherwise we might
// widen what was previously well-defined to something undefined.
- if (IndicesValid && InVec.isUndef() && N->getConstantOperandVal(2) == 0)
+ if (IndicesValid && InVec.isUndefOrPoison() &&
+ N->getConstantOperandVal(2) == 0)
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, InVec, SubVec,
N->getOperand(2));
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 16c3b295426c648..b3b1fd6a8c070ec 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -200,7 +200,7 @@ bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
unsigned i = 0, e = N->getNumOperands();
// Skip over all of the undef values.
- while (i != e && N->getOperand(i).isUndef())
+ while (i != e && N->getOperand(i).isUndefOrPoison())
++i;
// Do not accept an all-undef vector.
@@ -229,7 +229,7 @@ bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
// undefs. Even with the above element type twiddling, this should be OK, as
// the same type legalization should have applied to all the elements.
for (++i; i != e; ++i)
- if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
+ if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndefOrPoison())
return false;
return true;
}
@@ -248,7 +248,7 @@ bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
bool IsAllUndef = true;
for (const SDValue &Op : N->op_values()) {
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
continue;
IsAllUndef = false;
// Do not accept build_vectors that aren't all constants or which have non-0
@@ -289,7 +289,7 @@ bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
return false;
for (const SDValue &Op : N->op_values()) {
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
continue;
if (!isa<ConstantSDNode>(Op))
return false;
@@ -302,7 +302,7 @@ bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
return false;
for (const SDValue &Op : N->op_values()) {
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
continue;
if (!isa<ConstantFPSDNode>(Op))
return false;
@@ -332,7 +332,7 @@ bool ISD::isVectorShrinkable(const SDNode *N, unsigned NewEltSize,
return false;
for (const SDValue &Op : N->op_values()) {
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
continue;
if (!isa<ConstantSDNode>(Op))
return false;
@@ -353,11 +353,12 @@ bool ISD::allOperandsUndef(const SDNode *N) {
// is probably the desired behavior.
if (N->getNumOperands() == 0)
return false;
- return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
+ return all_of(N->op_values(),
+ [](SDValue Op) { return Op.isUndefOrPoison(); });
}
bool ISD::isFreezeUndef(const SDNode *N) {
- return N->getOpcode() == ISD::FREEZE && N->getOperand(0).isUndef();
+ return N->getOpcode() == ISD::FREEZE && N->getOperand(0).isUndefOrPoison();
}
template <typename ConstNodeType>
@@ -375,7 +376,7 @@ bool ISD::matchUnaryPredicateImpl(SDValue Op,
EVT SVT = Op.getValueType().getScalarType();
for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
- if (AllowUndefs && Op.getOperand(i).isUndef()) {
+ if (AllowUndefs && Op.getOperand(i).isUndefOrPoison()) {
if (!Match(nullptr))
return false;
continue;
@@ -416,8 +417,8 @@ bool ISD::matchBinaryPredicate(
for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
SDValue LHSOp = LHS.getOperand(i);
SDValue RHSOp = RHS.getOperand(i);
- bool LHSUndef = AllowUndefs && LHSOp.isUndef();
- bool RHSUndef = AllowUndefs && RHSOp.isUndef();
+ bool LHSUndef = AllowUndefs && LHSOp.isUndefOrPoison();
+ bool RHSUndef = AllowUndefs && RHSOp.isUndefOrPoison();
auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
@@ -2153,7 +2154,7 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
"Invalid VECTOR_SHUFFLE");
// Canonicalize shuffle undef, undef -> undef
- if (N1.isUndef() && N2.isUndef())
+ if (N1.isUndefOrPoison() && N2.isUndefOrPoison())
return getUNDEF(VT);
// Validate that all indices in Mask are within the range of the elements
@@ -2174,7 +2175,7 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
}
// Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
- if (N1.isUndef())
+ if (N1.isUndefOrPoison())
commuteShuffle(N1, N2, MaskVec);
if (TLI->hasVectorBlend()) {
@@ -2210,7 +2211,7 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
// Canonicalize all index into lhs, -> shuffle lhs, undef
// Canonicalize all index into rhs, -> shuffle rhs, undef
bool AllLHS = true, AllRHS = true;
- bool N2Undef = N2.isUndef();
+ bool N2Undef = N2.isUndefOrPoison();
for (int i = 0; i != NElts; ++i) {
if (MaskVec[i] >= NElts) {
if (N2Undef)
@@ -2230,9 +2231,9 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
commuteShuffle(N1, N2, MaskVec);
}
// Reset our undef status after accounting for the mask.
- N2Undef = N2.isUndef();
+ N2Undef = N2.isUndefOrPoison();
// Re-check whether both sides ended up undef.
- if (N1.isUndef() && N2Undef)
+ if (N1.isUndefOrPoison() && N2Undef)
return getUNDEF(VT);
// If Identity shuffle return that node.
@@ -2258,7 +2259,7 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
BitVector UndefElements;
SDValue Splat = BV->getSplatValue(&UndefElements);
// If this is a splat of an undef, shuffling it is also undef.
- if (Splat && Splat.isUndef())
+ if (Splat && Splat.isUndefOrPoison())
return getUNDEF(VT);
bool SameNumElts =
@@ -2852,18 +2853,18 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
// predicate pass or fail, so we can return undef.
// Matches behavior in llvm::ConstantFoldCompareInstruction.
// icmp eq/ne X, undef -> undef.
- if ((N1.isUndef() || N2.isUndef()) &&
+ if ((N1.isUndefOrPoison() || N2.isUndefOrPoison()) &&
(Cond == ISD::SETEQ || Cond == ISD::SETNE))
return GetUndefBooleanConstant();
// If both operands are undef, we can return undef for int comparison.
// icmp undef, undef -> undef.
- if (N1.isUndef() && N2.isUndef())
+ if (N1.isUndefOrPoison() && N2.isUndefOrPoison())
return GetUndefBooleanConstant();
// icmp X, X -> true/false
// icmp X, undef -> true/false because undef could be X.
- if (N1.isUndef() || N2.isUndef() || N1 == N2)
+ if (N1.isUndefOrPoison() || N2.isUndefOrPoison() || N1 == N2)
return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
}
@@ -2936,14 +2937,15 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
OpVT);
}
- } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
+ } else if (N1CFP && OpVT.isSimple() && !N2.isUndefOrPoison()) {
// Ensure that the constant occurs on the RHS.
ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
return SDValue();
return getSetCC(dl, VT, N2, N1, SwappedCond);
} else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
- (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
+ (OpVT.isFloatingPoint() &&
+ (N1.isUndefOrPoison() || N2.isUndefOrPoison()))) {
// If an operand is known to be a nan (or undef that could be a nan), we can
// fold it.
// Choosing NaN for the undef will always make unordered comparison succeed
@@ -3045,7 +3047,7 @@ bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
// vector types.
switch (Opcode) {
case ISD::SPLAT_VECTOR:
- UndefElts = V.getOperand(0).isUndef()
+ UndefElts = V.getOperand(0).isUndefOrPoison()
? APInt::getAllOnes(DemandedElts.getBitWidth())
: APInt(DemandedElts.getBitWidth(), 0);
return true;
@@ -3091,7 +3093,7 @@ bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
SDValue Scl;
for (unsigned i = 0; i != NumElts; ++i) {
SDValue Op = V.getOperand(i);
- if (Op.isUndef()) {
+ if (Op.isUndefOrPoison()) {
UndefElts.setBit(i);
continue;
}
@@ -5465,6 +5467,9 @@ bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op,
case ISD::CopyFromReg:
return true;
+ case ISD::POISON:
+ return false;
+
case ISD::UNDEF:
return PoisonOnly;
@@ -6068,7 +6073,7 @@ static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
"Incorrect element count in BUILD_VECTOR!");
// BUILD_VECTOR of UNDEFs is UNDEF.
- if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
+ if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndefOrPoison(); }))
return DAG.getUNDEF(VT);
// BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
@@ -6110,7 +6115,7 @@ static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
return Ops[0];
// Concat of UNDEFs is UNDEF.
- if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
+ if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndefOrPoison(); }))
return DAG.getUNDEF(VT);
// Scan the operands and look for extract operations from a single source
@@ -6149,7 +6154,7 @@ static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
SmallVector<SDValue, 16> Elts;
for (SDValue Op : Ops) {
EVT OpVT = Op.getValueType();
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
else if (Op.getOpcode() == ISD::BUILD_VECTOR)
Elts.append(Op->op_begin(), Op->op_end());
@@ -6164,7 +6169,7 @@ static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
if (SVT.bitsGT(VT.getScalarType())) {
for (SDValue &Op : Elts) {
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
Op = DAG.getUNDEF(SVT);
else
Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
@@ -6283,18 +6288,18 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
N1.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!");
assert(N1.getValueType().bitsLT(VT) && "Invalid fpext node, dst < src!");
- if (N1.isUndef())
+ if (N1.isUndefOrPoison())
return getUNDEF(VT);
break;
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
- if (N1.isUndef())
+ if (N1.isUndefOrPoison())
return getUNDEF(VT);
break;
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
// [us]itofp(undef) = 0, because the result value is bounded.
- if (N1.isUndef())
+ if (N1.isUndefOrPoison())
return getConstantFP(0.0, DL, VT);
break;
case ISD::SIGN_EXTEND:
@@ -6314,9 +6319,10 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
Flags.setNonNeg(N1->getFlags().hasNonNeg());
return getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
}
- if (OpOpcode == ISD::UNDEF)
+ if (N1.isUndefOrPoison())
// sext(undef) = 0, because the top bits will all be the same.
return getConstant(0, DL, VT);
+
break;
case ISD::ZERO_EXTEND:
assert(VT.isInteger() && N1.getValueType().isInteger() &&
@@ -6334,7 +6340,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
Flags.setNonNeg(N1->getFlags().hasNonNeg());
return getNode(ISD::ZERO_EXTEND, DL, VT, N1.getOperand(0), Flags);
}
- if (OpOpcode == ISD::UNDEF)
+ if (N1.isUndefOrPoison())
// zext(undef) = 0, because the top bits will be zero.
return getConstant(0, DL, VT);
@@ -6376,7 +6382,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
// (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
return getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
}
- if (OpOpcode == ISD::UNDEF)
+ if (N1.isUndefOrPoison())
return getUNDEF(VT);
// (ext (trunc x)) -> x
@@ -6411,7 +6417,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
return getNode(ISD::TRUNCATE, DL, VT, N1.getOperand(0));
return N1.getOperand(0);
}
- if (OpOpcode == ISD::UNDEF)
+ if (N1.isUndefOrPoison())
return getUNDEF(VT);
if (OpOpcode == ISD::VSCALE && !NewNodesMustHaveLegalTypes)
return getVScale(DL, VT,
@@ -6429,14 +6435,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
break;
case ISD::ABS:
assert(VT.isInteger() && VT == N1.getValueType() && "Invalid ABS!");
- if (OpOpcode == ISD::UNDEF)
+ if (N1.isUndefOrPoison())
return getConstant(0, DL, VT);
break;
case ISD::BSWAP:
assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BSWAP!");
assert((VT.getScalarSizeInBits() % 16 == 0) &&
"BSWAP types must be a multiple of 16 bits!");
- if (OpOpcode == ISD::UNDEF)
+ if (N1.isUndefOrPoison())
return getUNDEF(VT);
// bswap(bswap(X)) -> X.
if (OpOpcode == ISD::BSWAP)
@@ -6444,7 +6450,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
break;
case ISD::BITREVERSE:
assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BITREVERSE!");
- if (OpOpcode == ISD::UNDEF)
+ if (N1.isUndefOrPoison())
return getUNDEF(VT);
break;
case ISD::BITCAST:
@@ -6453,7 +6459,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
if (VT == N1.getValueType()) return N1; // noop conversion.
if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
return getNode(ISD::BITCAST, DL, VT, N1.getOperand(0));
- if (OpOpcode == ISD::UNDEF)
+ if (N1.isUndefOrPoison())
return getUNDEF(VT);
break;
case ISD::SCALAR_TO_VECTOR:
@@ -6463,7 +6469,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
N1.getValueType().isInteger() &&
VT.getVectorElementType().bitsLE(N1.getValueType()))) &&
"Illegal SCALAR_TO_VECTOR node!");
- if (OpOpcode == ISD::UNDEF)
+ if (N1.isUndefOrPoison())
return getUNDEF(VT);
// scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
@@ -6474,7 +6480,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
break;
case ISD::FNEG:
// Negation of an unknown bag of bits is still completely undefined.
- if (OpOpcode == ISD::UNDEF)
+ if (N1.isUndefOrPoison())
return getUNDEF(VT);
if (OpOpcode == ISD::FNEG) // --X -> X
@@ -6655,13 +6661,13 @@ bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
// zero/undef, the whole op is undef.
assert(Ops.size() == 2 && "Div/rem should have 2 operands");
SDValue Divisor = Ops[1];
- if (Divisor.isUndef() || isNullConstant(Divisor))
+ if (Divisor.isUndefOrPoison() || isNullConstant(Divisor))
return true;
return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
- llvm::any_of(Divisor->op_values(),
- [](SDValue V) { return V.isUndef() ||
- isNullConstant(V); });
+ llvm::any_of(Divisor->op_values(), [](SDValue V) {
+ return V.isUndefOrPoison() || isNullConstant(V);
+ });
// TODO: Handle signed overflow.
}
// TODO: Handle oversized shifts.
@@ -6903,7 +6909,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
llvm::EVT OpVT = Ops[0].getOperand(0).getValueType();
for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I) {
SDValue Op = Ops[0].getOperand(I);
- if (Op.isUndef()) {
+ if (Op.isUndefOrPoison()) {
ScalarOps.push_back(getUNDEF(OpVT));
continue;
}
@@ -7000,7 +7006,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
};
auto IsBuildVectorSplatVectorOrUndef = [](const SDValue &Op) {
- return Op.isUndef() || Op.getOpcode() == ISD::CONDCODE ||
+ return Op.isUndefOrPoison() || Op.getOpcode() == ISD::CONDCODE ||
Op.getOpcode() == ISD::BUILD_VECTOR ||
Op.getOpcode() == ISD::SPLAT_VECTOR;
};
@@ -7043,7 +7049,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
EVT InSVT = Op.getValueType().getScalarType();
if (Op.getOpcode() != ISD::BUILD_VECTOR &&
Op.getOpcode() != ISD::SPLAT_VECTOR) {
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
ScalarOps.push_back(getUNDEF(InSVT));
else
ScalarOps.push_back(Op);
@@ -7061,7 +7067,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
// - if we fail to constant fold we can't guarantee the (dead) nodes
// we're creating will be cleaned up before being visited for
// legalization.
- if (NewNodesMustHaveLegalTypes && !ScalarOp.isUndef() &&
+ if (NewNodesMustHaveLegalTypes && !ScalarOp.isUndefOrPoison() &&
!isa<ConstantSDNode>(ScalarOp) &&
TLI->getTypeAction(*getContext(), InSVT) !=
TargetLowering::TypeLegal)
@@ -7076,7 +7082,8 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
// Scalar folding only succeeded if the result is a constant or UNDEF.
- if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
+ if (!ScalarResult.isUndefOrPoison() &&
+ ScalarResult.getOpcode() != ISD::Constant &&
ScalarResult.getOpcode() != ISD::ConstantFP)
return SDValue();
@@ -7160,7 +7167,7 @@ SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
case ISD::FSUB:
// -0.0 - undef --> undef (consistent with "fneg undef")
if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, /*AllowUndefs*/ true))
- if (N1C && N1C->getValueAPF().isNegZero() && N2.isUndef())
+ if (N1C && N1C->getValueAPF().isNegZero() && N2.isUndefOrPoison())
return getUNDEF(VT);
[[fallthrough]];
@@ -7170,9 +7177,9 @@ SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
case ISD::FREM:
// If both operands are undef, the result is undef. If 1 operand is undef,
// the result is NaN. This should match the behavior of the IR optimizer.
- if (N1.isUndef() && N2.isUndef())
+ if (N1.isUndefOrPoison() && N2.isUndefOrPoison())
return getUNDEF(VT);
- if (N1.isUndef() || N2.isUndef())
+ if (N1.isUndefOrPoison() || N2.isUndefOrPoison())
return getConstantFP(APFloat::getNaN(VT.getFltSemantics()), DL, VT);
}
return SDValue();
@@ -7488,7 +7495,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
element type of the vector.");
// Extract from an undefined value or using an undefined index is undefined.
- if (N1.isUndef() || N2.isUndef())
+ if (N1.isUndefOrPoison() || N2.isUndefOrPoison())
return getUNDEF(VT);
// EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length
@@ -7615,7 +7622,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
return N1;
// EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
- if (N1.isUndef())
+ if (N1.isUndefOrPoison())
return getUNDEF(VT);
// EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
@@ -7640,7 +7647,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
return SV;
// Canonicalize an UNDEF to the RHS, even over a constant.
- if (N1.isUndef()) {
+ if (N1.isUndefOrPoison()) {
if (TLI->isCommutativeBinOp(Opcode)) {
std::swap(N1, N2);
} else {
@@ -7660,10 +7667,10 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
}
// Fold a bunch of operators when the RHS is undef.
- if (N2.isUndef()) {
+ if (N2.isUndefOrPoison()) {
switch (Opcode) {
case ISD::XOR:
- if (N1.isUndef())
+ if (N1.isUndefOrPoison())
// Handle undef ^ undef -> 0 special case. This is a common
// idiom (misuse).
return getConstant(0, DL, VT);
@@ -7808,18 +7815,18 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
return getUNDEF(VT);
// Undefined index can be assumed out-of-bounds, so that's UNDEF too.
- if (N3.isUndef())
+ if (N3.isUndefOrPoison())
return getUNDEF(VT);
// If the inserted element is an UNDEF, just use the input vector.
- if (N2.isUndef())
+ if (N2.isUndefOrPoison())
return N1;
break;
}
case ISD::INSERT_SUBVECTOR: {
// Inserting undef into undef is still undef.
- if (N1.isUndef() && N2.isUndef())
+ if (N1.isUndefOrPoison() && N2.isUndefOrPoison())
return getUNDEF(VT);
EVT N2VT = N2.getValueType();
@@ -7850,7 +7857,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
// If this is an insert of an extracted vector into an undef vector, we
// can just use the input to the extract.
- if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
+ if (N1.isUndefOrPoison() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT)
return N2.getOperand(0);
break;
@@ -7878,7 +7885,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
assert(VecVT.getVectorElementCount() == MaskVT.getVectorElementCount() &&
"Vector and mask must have same number of elements.");
- if (N1.isUndef() || N2.isUndef())
+ if (N1.isUndefOrPoison() || N2.isUndefOrPoison())
return N3;
break;
@@ -7969,7 +7976,7 @@ SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
/// operand.
static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
const SDLoc &dl) {
- assert(!Value.isUndef());
+ assert(!Value.isUndefOrPoison());
unsigned NumBits = VT.getScalarSizeInBits();
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
@@ -8134,7 +8141,7 @@ static SDValue getMemcpyLoadsAndStores(
const AAMDNodes &AAInfo, BatchAAResults *BatchAA) {
// Turn a memcpy of undef to nop.
// FIXME: We need to honor volatile even is Src is undef.
- if (Src.isUndef())
+ if (Src.isUndefOrPoison())
return Chain;
// Expand memcpy to a series of load and store ops if the size operand falls
@@ -8337,7 +8344,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
const AAMDNodes &AAInfo) {
// Turn a memmove of undef to nop.
// FIXME: We need to honor volatile even is Src is undef.
- if (Src.isUndef())
+ if (Src.isUndefOrPoison())
return Chain;
// Expand memmove to a series of load and store ops if the size operand falls
@@ -8460,7 +8467,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
const AAMDNodes &AAInfo) {
// Turn a memset of undef to nop.
// FIXME: We need to honor volatile even is Src is undef.
- if (Src.isUndef())
+ if (Src.isUndefOrPoison())
return Chain;
// Expand memset to a series of load/store ops if the size operand
@@ -9189,7 +9196,7 @@ static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
// If the 'Offset' value isn't a constant, we can't handle this.
if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
- if (OffsetOp.isUndef())
+ if (OffsetOp.isUndefOrPoison())
return InferPointerInfo(Info, DAG, Ptr);
return Info;
}
@@ -9240,10 +9247,16 @@ SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
}
bool Indexed = AM != ISD::UNINDEXED;
- assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
+ assert((Indexed || Offset.isUndefOrPoison()) &&
+ "Unindexed load with an offset!");
SDVTList VTs = Indexed ?
getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
+
+ // Lower poison to undef.
+ if (Ptr.getNode()->isPoison())
+ Ptr = getUNDEF(Ptr.getValueType());
+
SDValue Ops[] = { Chain, Ptr, Offset };
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
@@ -9308,7 +9321,8 @@ SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
SDValue Base, SDValue Offset,
ISD::MemIndexedMode AM) {
LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
- assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
+ assert(LD->getOffset().isUndefOrPoison() &&
+ "Load is already a indexed load!");
// Don't propagate the invariant or dereferenceable flags.
auto MMOFlags =
LD->getMemOperand()->getFlags() &
@@ -9440,7 +9454,8 @@ SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
SDValue Base, SDValue Offset,
ISD::MemIndexedMode AM) {
StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
- assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
+ assert(ST->getOffset().isUndefOrPoison() &&
+ "Store is already a indexed store!");
SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
FoldingSetNodeID ID;
@@ -9495,7 +9510,8 @@ SDValue SelectionDAG::getLoadVP(ISD::MemIndexedMode AM,
EVT MemVT, MachineMemOperand *MMO,
bool IsExpanding) {
bool Indexed = AM != ISD::UNINDEXED;
- assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
+ assert((Indexed || Offset.isUndefOrPoison()) &&
+ "Unindexed load with an offset!");
SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other)
: getVTList(VT, MVT::Other);
@@ -9570,7 +9586,8 @@ SDValue SelectionDAG::getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl,
SDValue Base, SDValue Offset,
ISD::MemIndexedMode AM) {
auto *LD = cast<VPLoadSDNode>(OrigLoad);
- assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
+ assert(LD->getOffset().isUndefOrPoison() &&
+ "Load is already a indexed load!");
// Don't propagate the invariant or dereferenceable flags.
auto MMOFlags =
LD->getMemOperand()->getFlags() &
@@ -9589,7 +9606,8 @@ SDValue SelectionDAG::getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val,
bool IsCompressing) {
assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
bool Indexed = AM != ISD::UNINDEXED;
- assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!");
+ assert((Indexed || Offset.isUndefOrPoison()) &&
+ "Unindexed vp_store with an offset!");
SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
: getVTList(MVT::Other);
SDValue Ops[] = {Chain, Val, Ptr, Offset, Mask, EVL};
@@ -9692,7 +9710,8 @@ SDValue SelectionDAG::getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl,
SDValue Base, SDValue Offset,
ISD::MemIndexedMode AM) {
auto *ST = cast<VPStoreSDNode>(OrigStore);
- assert(ST->getOffset().isUndef() && "Store is already an indexed store!");
+ assert(ST->getOffset().isUndefOrPoison() &&
+ "Store is already an indexed store!");
SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
SDValue Ops[] = {ST->getChain(), ST->getValue(), Base,
Offset, ST->getMask(), ST->getVectorLength()};
@@ -9723,7 +9742,8 @@ SDValue SelectionDAG::getStridedLoadVP(
SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask,
SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding) {
bool Indexed = AM != ISD::UNINDEXED;
- assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
+ assert((Indexed || Offset.isUndefOrPoison()) &&
+ "Unindexed load with an offset!");
SDValue Ops[] = {Chain, Ptr, Offset, Stride, Mask, EVL};
SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other)
@@ -9780,7 +9800,8 @@ SDValue SelectionDAG::getStridedStoreVP(SDValue Chain, const SDLoc &DL,
bool IsTruncating, bool IsCompressing) {
assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
bool Indexed = AM != ISD::UNINDEXED;
- assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!");
+ assert((Indexed || Offset.isUndefOrPoison()) &&
+ "Unindexed vp_store with an offset!");
SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
: getVTList(MVT::Other);
SDValue Ops[] = {Chain, Val, Ptr, Offset, Stride, Mask, EVL};
@@ -9950,7 +9971,7 @@ SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
ISD::MemIndexedMode AM,
ISD::LoadExtType ExtTy, bool isExpanding) {
bool Indexed = AM != ISD::UNINDEXED;
- assert((Indexed || Offset.isUndef()) &&
+ assert((Indexed || Offset.isUndefOrPoison()) &&
"Unindexed masked load with an offset!");
SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other)
: getVTList(VT, MVT::Other);
@@ -9982,7 +10003,8 @@ SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl,
SDValue Base, SDValue Offset,
ISD::MemIndexedMode AM) {
MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad);
- assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!");
+ assert(LD->getOffset().isUndefOrPoison() &&
+ "Masked load is already a indexed load!");
return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base,
Offset, LD->getMask(), LD->getPassThru(),
LD->getMemoryVT(), LD->getMemOperand(), AM,
@@ -9998,7 +10020,7 @@ SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
assert(Chain.getValueType() == MVT::Other &&
"Invalid chain type");
bool Indexed = AM != ISD::UNINDEXED;
- assert((Indexed || Offset.isUndef()) &&
+ assert((Indexed || Offset.isUndefOrPoison()) &&
"Unindexed masked store with an offset!");
SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other)
: getVTList(MVT::Other);
@@ -10031,7 +10053,7 @@ SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
SDValue Base, SDValue Offset,
ISD::MemIndexedMode AM) {
MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore);
- assert(ST->getOffset().isUndef() &&
+ assert(ST->getOffset().isUndefOrPoison() &&
"Masked store is already a indexed store!");
return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
@@ -10227,11 +10249,11 @@ SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
// select undef, T, F --> T (if T is a constant), otherwise F
// select, ?, undef, F --> F
// select, ?, T, undef --> T
- if (Cond.isUndef())
+ if (Cond.isUndefOrPoison())
return isConstantValueOfAnyType(T) ? T : F;
- if (T.isUndef())
+ if (T.isUndefOrPoison())
return F;
- if (F.isUndef())
+ if (F.isUndefOrPoison())
return T;
// select true, T, F --> T
@@ -10248,10 +10270,10 @@ SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) {
// shift undef, Y --> 0 (can always assume that the undef value is 0)
- if (X.isUndef())
+ if (X.isUndefOrPoison())
return getConstant(0, SDLoc(X.getNode()), X.getValueType());
// shift X, undef --> undef (because it may shift by the bitwidth)
- if (Y.isUndef())
+ if (Y.isUndefOrPoison())
return getUNDEF(X.getValueType());
// shift 0, Y --> 0
@@ -10286,10 +10308,12 @@ SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
(YC && YC->getValueAPF().isInfinity());
- if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef()))
+ if (Flags.hasNoNaNs() &&
+ (HasNan || X.isUndefOrPoison() || Y.isUndefOrPoison()))
return getUNDEF(X.getValueType());
- if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef()))
+ if (Flags.hasNoInfs() &&
+ (HasInf || X.isUndefOrPoison() || Y.isUndefOrPoison()))
return getUNDEF(X.getValueType());
if (!YC)
@@ -12208,7 +12232,7 @@ bool llvm::isNullConstant(SDValue V) {
}
bool llvm::isNullConstantOrUndef(SDValue V) {
- return V.isUndef() || isNullConstant(V);
+ return V.isUndefOrPoison() || isNullConstant(V);
}
bool llvm::isNullFPConstant(SDValue V) {
@@ -13106,7 +13130,7 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
SDValue OpVal = getOperand(i);
unsigned BitPos = j * EltWidth;
- if (OpVal.isUndef())
+ if (OpVal.isUndefOrPoison())
SplatUndef.setBits(BitPos, BitPos + EltWidth);
else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
@@ -13168,7 +13192,7 @@ SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
if (!DemandedElts[i])
continue;
SDValue Op = getOperand(i);
- if (Op.isUndef()) {
+ if (Op.isUndefOrPoison()) {
if (UndefElements)
(*UndefElements)[i] = true;
} else if (!Splatted) {
@@ -13180,7 +13204,7 @@ SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
if (!Splatted) {
unsigned FirstDemandedIdx = DemandedElts.countr_zero();
- assert(getOperand(FirstDemandedIdx).isUndef() &&
+ assert(getOperand(FirstDemandedIdx).isUndefOrPoison() &&
"Can only have a splat without a constant for all undefs.");
return getOperand(FirstDemandedIdx);
}
@@ -13209,7 +13233,7 @@ bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts,
// Set the undefs even if we don't find a sequence (like getSplatValue).
if (UndefElements)
for (unsigned I = 0; I != NumOps; ++I)
- if (DemandedElts[I] && getOperand(I).isUndef())
+ if (DemandedElts[I] && getOperand(I).isUndefOrPoison())
(*UndefElements)[I] = true;
// Iteratively widen the sequence length looking for repetitions.
@@ -13220,12 +13244,12 @@ bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts,
continue;
SDValue &SeqOp = Sequence[I % SeqLen];
SDValue Op = getOperand(I);
- if (Op.isUndef()) {
+ if (Op.isUndefOrPoison()) {
if (!SeqOp)
SeqOp = Op;
continue;
}
- if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) {
+ if (SeqOp && !SeqOp.isUndefOrPoison() && SeqOp != Op) {
Sequence.clear();
break;
}
@@ -13306,7 +13330,7 @@ bool BuildVectorSDNode::getConstantRawBits(
for (unsigned I = 0; I != NumSrcOps; ++I) {
SDValue Op = getOperand(I);
- if (Op.isUndef()) {
+ if (Op.isUndefOrPoison()) {
SrcUndeElements.set(I);
continue;
}
@@ -13380,7 +13404,7 @@ void BuildVectorSDNode::recastRawBits(bool IsLittleEndian,
bool BuildVectorSDNode::isConstant() const {
for (const SDValue &Op : op_values()) {
unsigned Opc = Op.getOpcode();
- if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
+ if (!Op.isUndefOrPoison() && Opc != ISD::Constant && Opc != ISD::ConstantFP)
return false;
}
return true;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 4a9ac8580e4e2e0..f586725082be8ff 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -1819,7 +1819,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
- return DAG.getUNDEF(VT);
+ return isa<PoisonValue>(C) ? DAG.getPoison(VT) : DAG.getUNDEF(VT);
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
visit(CE->getOpcode(), *CE);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 7b1a2d640a2bd4e..98e1c687b6b223d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -188,6 +188,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::CopyToReg: return "CopyToReg";
case ISD::CopyFromReg: return "CopyFromReg";
case ISD::UNDEF: return "undef";
+ case ISD::POISON: return "poison";
case ISD::VSCALE: return "vscale";
case ISD::MERGE_VALUES: return "merge_values";
case ISD::INLINEASM: return "inlineasm";
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 66db2ae993de872..3bbe6917eca577f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -3276,6 +3276,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
case ISD::WRITE_REGISTER:
Select_WRITE_REGISTER(NodeToMatch);
return;
+ case ISD::POISON:
case ISD::UNDEF:
Select_UNDEF(NodeToMatch);
return;
diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index 987ea826f782e79..9255d259a534fd0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -257,7 +257,7 @@ static bool willLowerDirectly(SDValue Incoming) {
if (Incoming.getValueType().getSizeInBits() > 64)
return false;
- return isIntOrFPConstant(Incoming) || Incoming.isUndef();
+ return isIntOrFPConstant(Incoming) || Incoming.isUndefOrPoison();
}
/// Try to find existing copies of the incoming values in stack slots used for
@@ -443,8 +443,8 @@ lowerIncomingStatepointValue(SDValue Incoming, bool RequireSpillSlot,
}
assert(Incoming.getValueType().getSizeInBits() <= 64);
-
- if (Incoming.isUndef()) {
+
+ if (Incoming.isUndefOrPoison()) {
// Put an easily recognized constant that's unlikely to be a valid
// value so that uses of undef by the consumer of the stackmap is
// easily recognized. This is legal since the compiler is always
@@ -1287,7 +1287,7 @@ void SelectionDAGBuilder::visitGCRelocate(const GCRelocateInst &Relocate) {
assert(Record.type == RecordType::NoRelocate);
SDValue SD = getValue(DerivedPtr);
- if (SD.isUndef() && SD.getValueType().getSizeInBits() <= 64) {
+ if (SD.isUndefOrPoison() && SD.getValueType().getSizeInBits() <= 64) {
// Lowering relocate(undef) as arbitrary constant. Current constant value
// is chosen such that it's unlikely to be a valid pointer.
setValue(&Relocate, DAG.getConstant(0xFEFEFEFE, SDLoc(SD), MVT::i64));
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index adfb96041c5c06b..2edb35b7a6ee5bb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -682,7 +682,7 @@ SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
return SDValue();
// Ignore UNDEFs.
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
return SDValue();
// Not demanding any bits/elts from Op.
@@ -1153,7 +1153,7 @@ bool TargetLowering::SimplifyDemandedBits(
SDLoc dl(Op);
// Undef operand.
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
return false;
// We can't simplify target constants.
@@ -1455,7 +1455,7 @@ bool TargetLowering::SimplifyDemandedBits(
// AND(INSERT_SUBVECTOR(C,X,I),M) -> INSERT_SUBVECTOR(AND(C,M),X,I)
// iff 'C' is Undef/Constant and AND(X,M) == X (for DemandedBits).
if (Op0.getOpcode() == ISD::INSERT_SUBVECTOR && !VT.isScalableVector() &&
- (Op0.getOperand(0).isUndef() ||
+ (Op0.getOperand(0).isUndefOrPoison() ||
ISD::isBuildVectorOfConstantSDNodes(Op0.getOperand(0).getNode())) &&
Op0->hasOneUse()) {
unsigned NumSubElts =
@@ -3050,7 +3050,8 @@ static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG,
// nodes. Ignore opaque integers because they do not constant fold.
SDValue Elt = BV->getOperand(Index);
auto *C = dyn_cast<ConstantSDNode>(Elt);
- if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque()))
+ if (isa<ConstantFPSDNode>(Elt) || Elt.isUndefOrPoison() ||
+ (C && !C->isOpaque()))
return Elt;
}
@@ -3068,7 +3069,8 @@ static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG,
SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0);
SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1);
if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT)
- if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef())
+ if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1)
+ .isUndefOrPoison())
KnownUndef.setBit(i);
}
return KnownUndef;
@@ -3097,7 +3099,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
"Mask size mismatches value type element count!");
// Undef operand.
- if (Op.isUndef()) {
+ if (Op.isUndefOrPoison()) {
KnownUndef.setAllBits();
return false;
}
@@ -3283,7 +3285,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
SmallVector<SDValue, 32> Ops(Op->ops());
bool Updated = false;
for (unsigned i = 0; i != NumElts; ++i) {
- if (!DemandedElts[i] && !Ops[i].isUndef()) {
+ if (!DemandedElts[i] && !Ops[i].isUndefOrPoison()) {
Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType());
KnownUndef.setBit(i);
Updated = true;
@@ -3295,7 +3297,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
}
for (unsigned i = 0; i != NumElts; ++i) {
SDValue SrcOp = Op.getOperand(i);
- if (SrcOp.isUndef()) {
+ if (SrcOp.isUndefOrPoison()) {
KnownUndef.setBit(i);
} else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() &&
(isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) {
@@ -3356,7 +3358,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
return true;
// If none of the src operand elements are demanded, replace it with undef.
- if (!DemandedSrcElts && !Src.isUndef())
+ if (!DemandedSrcElts && !Src.isUndefOrPoison())
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
TLO.DAG.getUNDEF(VT), Sub,
Op.getOperand(2)));
@@ -3429,7 +3431,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
KnownZero, TLO, Depth + 1))
return true;
- KnownUndef.setBitVal(Idx, Scl.isUndef());
+ KnownUndef.setBitVal(Idx, Scl.isUndefOrPoison());
KnownZero.setBitVal(Idx, isNullConstant(Scl) || isNullFPConstant(Scl));
break;
@@ -7359,7 +7361,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
case ISD::BUILD_VECTOR: {
// Only permit BUILD_VECTOR of constants.
if (llvm::any_of(Op->op_values(), [&](SDValue N) {
- return !N.isUndef() && !isa<ConstantFPSDNode>(N);
+ return !N.isUndefOrPoison() && !isa<ConstantFPSDNode>(N);
}))
break;
@@ -7367,7 +7369,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
(isOperationLegal(ISD::ConstantFP, VT) &&
isOperationLegal(ISD::BUILD_VECTOR, VT)) ||
llvm::all_of(Op->op_values(), [&](SDValue N) {
- return N.isUndef() ||
+ return N.isUndefOrPoison() ||
isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT,
OptForSize);
});
@@ -7377,7 +7379,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
SmallVector<SDValue, 4> Ops;
for (SDValue C : Op->op_values()) {
- if (C.isUndef()) {
+ if (C.isUndefOrPoison()) {
Ops.push_back(C);
continue;
}
@@ -11812,7 +11814,7 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node,
SDValue Chain = DAG.getEntryNode();
SDValue OutPos = DAG.getConstant(0, DL, PositionVT);
- bool HasPassthru = !Passthru.isUndef();
+ bool HasPassthru = !Passthru.isUndefOrPoison();
// If we have a passthru vector, store it on the stack, overwrite the matching
// positions and then re-write the last element that was potentially
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 34464d317beafe4..46264a18c39fcd8 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -6665,7 +6665,7 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
// SVE supports zero (and so undef) passthrough values only, everything else
// must be handled manually by an explicit select on the load's output.
- if (!PassThru->isUndef() && !isZerosVector(PassThru.getNode())) {
+ if (!PassThru->isUndefOrPoison() && !isZerosVector(PassThru.getNode())) {
SDValue Ops[] = {Chain, DAG.getUNDEF(VT), Mask, BasePtr, Index, Scale};
SDValue Load =
DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops,
@@ -6724,8 +6724,9 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
Index = convertToScalableVector(DAG, ContainerVT, Index);
Mask = convertFixedMaskToScalableVector(Mask, DAG);
- PassThru = PassThru->isUndef() ? DAG.getUNDEF(ContainerVT)
- : DAG.getConstant(0, DL, ContainerVT);
+ PassThru = PassThru->isUndefOrPoison()
+ ? DAG.getUNDEF(ContainerVT)
+ : DAG.getConstant(0, DL, ContainerVT);
// Emit equivalent scalable vector gather.
SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
@@ -6839,7 +6840,7 @@ SDValue AArch64TargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue PassThru = LoadNode->getPassThru();
SDValue Mask = LoadNode->getMask();
- if (PassThru->isUndef() || isZerosVector(PassThru.getNode()))
+ if (PassThru->isUndefOrPoison() || isZerosVector(PassThru.getNode()))
return Op;
SDValue Load = DAG.getMaskedLoad(
@@ -7096,7 +7097,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
EVT MaskVT = Mask.getValueType();
EVT ElmtVT = VecVT.getVectorElementType();
const bool IsFixedLength = VecVT.isFixedLengthVector();
- const bool HasPassthru = !Passthru.isUndef();
+ const bool HasPassthru = !Passthru.isUndefOrPoison();
unsigned MinElmts = VecVT.getVectorElementCount().getKnownMinValue();
EVT FixedVecVT = MVT::getVectorVT(ElmtVT.getSimpleVT(), MinElmts);
@@ -12789,7 +12790,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
SmallVector<ShuffleSourceInfo, 2> Sources;
for (unsigned i = 0; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- if (V.isUndef())
+ if (V.isUndefOrPoison())
continue;
else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
!isa<ConstantSDNode>(V.getOperand(1)) ||
@@ -12823,7 +12824,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
unsigned OutputFactor = VT.getScalarSizeInBits() / 8;
for (unsigned I = 0; I < NumElts; ++I) {
SDValue V = Op.getOperand(I);
- if (V.isUndef()) {
+ if (V.isUndefOrPoison()) {
for (unsigned OF = 0; OF < OutputFactor; OF++)
Mask.push_back(-1);
continue;
@@ -13001,7 +13002,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
SDValue Entry = Op.getOperand(i);
- if (Entry.isUndef())
+ if (Entry.isUndefOrPoison())
continue;
auto Src = find(Sources, Entry.getOperand(0));
@@ -13566,7 +13567,7 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
unsigned BytesPerElt = EltVT.getSizeInBits() / 8;
bool Swap = false;
- if (V1.isUndef() || isZerosVector(V1.getNode())) {
+ if (V1.isUndefOrPoison() || isZerosVector(V1.getNode())) {
std::swap(V1, V2);
Swap = true;
}
@@ -13574,7 +13575,7 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
// If the V2 source is undef or zero then we can use a tbl1, as tbl1 will fill
// out of range values with 0s. We do need to make sure that any out-of-range
// values are really out-of-range for a v16i8 vector.
- bool IsUndefOrZero = V2.isUndef() || isZerosVector(V2.getNode());
+ bool IsUndefOrZero = V2.isUndefOrPoison() || isZerosVector(V2.getNode());
MVT IndexVT = MVT::v8i8;
unsigned IndexLen = 8;
if (Op.getValueSizeInBits() == 128) {
@@ -13898,7 +13899,8 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
Imm *= getExtFactor(V1);
return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2,
DAG.getConstant(Imm, dl, MVT::i32));
- } else if (V2->isUndef() && isSingletonEXTMask(ShuffleMask, VT, Imm)) {
+ } else if (V2->isUndefOrPoison() &&
+ isSingletonEXTMask(ShuffleMask, VT, Imm)) {
Imm *= getExtFactor(V1);
return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1,
DAG.getConstant(Imm, dl, MVT::i32));
@@ -14546,7 +14548,7 @@ static SDValue NormalizeBuildVector(SDValue Op,
Lane = DAG.getConstant(
CstLane->getAPIntValue().trunc(EltTy.getSizeInBits()).getZExtValue(),
dl, MVT::i32);
- } else if (Lane.getNode()->isUndef()) {
+ } else if (Lane.getNode()->isUndefOrPoison()) {
Lane = DAG.getUNDEF(MVT::i32);
} else {
assert(Lane.getValueType() == MVT::i32 &&
@@ -14663,9 +14665,10 @@ SDValue AArch64TargetLowering::LowerFixedLengthBuildVectorToSVE(
SDValue ZeroI64 = DAG.getConstant(0, DL, MVT::i64);
SmallVector<SDValue, 16> Intermediates = map_to_vector<16>(
Op->op_values(), [&, Undef = DAG.getUNDEF(ContainerVT)](SDValue Op) {
- return Op.isUndef() ? Undef
- : DAG.getNode(ISD::INSERT_VECTOR_ELT, DL,
- ContainerVT, Undef, Op, ZeroI64);
+ return Op.isUndefOrPoison()
+ ? Undef
+ : DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT, Undef,
+ Op, ZeroI64);
});
ElementCount ZipEC = ContainerVT.getVectorElementCount();
@@ -14676,8 +14679,9 @@ SDValue AArch64TargetLowering::LowerFixedLengthBuildVectorToSVE(
SDValue Op0 = DAG.getBitcast(ZipVT, Intermediates[I + 0]);
SDValue Op1 = DAG.getBitcast(ZipVT, Intermediates[I + 1]);
Intermediates[I / 2] =
- Op1.isUndef() ? Op0
- : DAG.getNode(AArch64ISD::ZIP1, DL, ZipVT, Op0, Op1);
+ Op1.isUndefOrPoison()
+ ? Op0
+ : DAG.getNode(AArch64ISD::ZIP1, DL, ZipVT, Op0, Op1);
}
Intermediates.resize(Intermediates.size() / 2);
@@ -14757,7 +14761,7 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
SDValue V = Op.getOperand(i);
if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
AllLanesExtractElt = false;
- if (V.isUndef()) {
+ if (V.isUndefOrPoison()) {
++NumUndefLanes;
continue;
}
@@ -15102,7 +15106,7 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
// vector element types. After type-legalization the inserted value is
// extended (i32) and it is safe to cast them to the vector type by ignoring
// the upper bits of the lowest lane (e.g. v8i8, v4i16).
- if (!Op0.isUndef()) {
+ if (!Op0.isUndefOrPoison()) {
LLVM_DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n");
Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0);
++i;
@@ -15113,7 +15117,7 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
});
for (; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- if (V.isUndef())
+ if (V.isUndefOrPoison())
continue;
SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
@@ -15342,7 +15346,7 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
}
// We can select these directly.
- if (isTypeLegal(InVT) && Vec0.isUndef())
+ if (isTypeLegal(InVT) && Vec0.isUndefOrPoison())
return Op;
// Ensure the subvector is half the size of the main vector.
@@ -15386,7 +15390,7 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
if (Idx == 0 && isPackedVectorType(VT, DAG)) {
// This will be matched by custom code during ISelDAGToDAG.
- if (Vec0.isUndef())
+ if (Vec0.isUndefOrPoison())
return Op;
std::optional<unsigned> PredPattern =
@@ -18541,7 +18545,7 @@ static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) {
// Make sure all other operands are equally extended.
bool SeenZExtOrSExt = !IsAnyExt;
for (SDValue Op : drop_begin(BV->ops())) {
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
continue;
if (calculatePreExtendType(Op) != PreExtendType)
@@ -18569,14 +18573,15 @@ static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) {
PreExtendType.getScalarSizeInBits() < 32 ? MVT::i32 : PreExtendType;
SmallVector<SDValue, 8> NewOps;
for (SDValue Op : BV->ops())
- NewOps.push_back(Op.isUndef() ? DAG.getUNDEF(PreExtendLegalType)
- : DAG.getAnyExtOrTrunc(Op.getOperand(0), DL,
- PreExtendLegalType));
+ NewOps.push_back(
+ Op.isUndefOrPoison()
+ ? DAG.getUNDEF(PreExtendLegalType)
+ : DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, PreExtendLegalType));
NBV = DAG.getNode(ISD::BUILD_VECTOR, DL, PreExtendVT, NewOps);
} else { // BV.getOpcode() == ISD::VECTOR_SHUFFLE
EVT PreExtendVT = VT.changeVectorElementType(PreExtendType.getScalarType());
NBV = DAG.getVectorShuffle(PreExtendVT, DL, BV.getOperand(0).getOperand(0),
- BV.getOperand(1).isUndef()
+ BV.getOperand(1).isUndefOrPoison()
? DAG.getUNDEF(PreExtendVT)
: BV.getOperand(1).getOperand(0),
cast<ShuffleVectorSDNode>(BV)->getMask());
@@ -19918,7 +19923,7 @@ static SDValue performConcatVectorsCombine(SDNode *N,
all_of(N->op_values(), [SrcVT](SDValue V) {
if (V.getValueType() != SrcVT)
return false;
- if (V.isUndef())
+ if (V.isUndefOrPoison())
return true;
LoadSDNode *LD = dyn_cast<LoadSDNode>(V);
return LD && V.hasOneUse() && LD->isSimple() && !LD->isIndexed() &&
@@ -19930,7 +19935,7 @@ static SDValue performConcatVectorsCombine(SDNode *N,
for (unsigned i = 0; i < N->getNumOperands(); i++) {
SDValue V = N->getOperand(i);
- if (V.isUndef())
+ if (V.isUndefOrPoison())
Ops.push_back(DAG.getUNDEF(FVT));
else {
LoadSDNode *LD = cast<LoadSDNode>(V);
@@ -19991,7 +19996,8 @@ static SDValue performConcatVectorsCombine(SDNode *N,
SDValue N10 = N1->getOperand(0);
SDValue N11 = N1->getOperand(1);
- if (!N00.isUndef() && !N01.isUndef() && !N10.isUndef() && !N11.isUndef()) {
+ if (!N00.isUndefOrPoison() && !N01.isUndefOrPoison() &&
+ !N10.isUndefOrPoison() && !N11.isUndefOrPoison()) {
SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, N00, N10);
SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, N01, N11);
return DAG.getNode(N0Opc, dl, VT, Concat0, Concat1);
@@ -20028,10 +20034,10 @@ static SDValue performConcatVectorsCombine(SDNode *N,
if (N->getNumOperands() == 2 && IsRSHRN(N0) &&
((IsRSHRN(N1) &&
N0.getConstantOperandVal(1) == N1.getConstantOperandVal(1)) ||
- N1.isUndef())) {
+ N1.isUndefOrPoison())) {
SDValue X = N0.getOperand(0).getOperand(0);
- SDValue Y = N1.isUndef() ? DAG.getUNDEF(X.getValueType())
- : N1.getOperand(0).getOperand(0);
+ SDValue Y = N1.isUndefOrPoison() ? DAG.getUNDEF(X.getValueType())
+ : N1.getOperand(0).getOperand(0);
EVT BVT =
X.getValueType().getDoubleNumVectorElementsVT(*DCI.DAG.getContext());
SDValue CC = DAG.getNode(ISD::CONCAT_VECTORS, dl, BVT, X, Y);
@@ -20131,7 +20137,7 @@ performInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
return SDValue();
// Ignore widening patterns.
- if (IdxVal == 0 && Vec.isUndef())
+ if (IdxVal == 0 && Vec.isUndefOrPoison())
return SDValue();
// Subvector must be half the width and an "aligned" insertion.
@@ -22362,7 +22368,7 @@ static SDValue performZExtDeinterleaveShuffleCombine(SDNode *N,
bool IsUndefDeInterleave = false;
if (!IsDeInterleave)
IsUndefDeInterleave =
- Shuffle->getOperand(1).isUndef() &&
+ Shuffle->getOperand(1).isUndefOrPoison() &&
all_of(
Shuffle->getMask().slice(ExtOffset, VT.getVectorNumElements() / 2),
[](int M) { return M < 0; }) &&
@@ -22899,7 +22905,7 @@ static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!");
// splice(pg, op1, undef) -> op1
- if (N->getOperand(2).isUndef())
+ if (N->getOperand(2).isUndefOrPoison())
return N->getOperand(1);
return SDValue();
@@ -22912,7 +22918,7 @@ static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG,
"Unexpected Opcode!");
// uunpklo/hi undef -> undef
- if (N->getOperand(0).isUndef())
+ if (N->getOperand(0).isUndefOrPoison())
return DAG.getUNDEF(N->getValueType(0));
// If this is a masked load followed by an UUNPKLO, fold this into a masked
@@ -22926,7 +22932,7 @@ static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG,
if (MLD->isUnindexed() && MLD->getExtensionType() != ISD::SEXTLOAD &&
SDValue(MLD, 0).hasOneUse() && Mask->getOpcode() == AArch64ISD::PTRUE &&
- (MLD->getPassThru()->isUndef() ||
+ (MLD->getPassThru()->isUndefOrPoison() ||
isZerosVector(MLD->getPassThru().getNode()))) {
unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
unsigned PgPattern = Mask->getConstantOperandVal(0);
@@ -23521,7 +23527,7 @@ static SDValue combineV3I8LoadExt(LoadSDNode *LD, SelectionDAG &DAG) {
SDValue Chain = LD->getChain();
SDValue BasePtr = LD->getBasePtr();
MachineMemOperand *MMO = LD->getMemOperand();
- assert(LD->getOffset().isUndef() && "undef offset expected");
+ assert(LD->getOffset().isUndefOrPoison() && "undef offset expected");
// Load 2 x i8, then 1 x i8.
SDValue L16 = DAG.getLoad(MVT::i16, DL, Chain, BasePtr, MMO);
@@ -23808,7 +23814,7 @@ static SDValue combineI8TruncStore(StoreSDNode *ST, SelectionDAG &DAG,
ValueVT != EVT::getVectorVT(*DAG.getContext(), MVT::i8, 3))
return SDValue();
- assert(ST->getOffset().isUndef() && "undef offset expected");
+ assert(ST->getOffset().isUndefOrPoison() && "undef offset expected");
SDLoc DL(ST);
auto WideVT = EVT::getVectorVT(
*DAG.getContext(),
@@ -26189,7 +26195,7 @@ static SDValue performDupLane128Combine(SDNode *N, SelectionDAG &DAG) {
if (Insert.getOpcode() != ISD::INSERT_SUBVECTOR)
return SDValue();
- if (!Insert.getOperand(0).isUndef())
+ if (!Insert.getOperand(0).isUndefOrPoison())
return SDValue();
uint64_t IdxInsert = Insert.getConstantOperandVal(2);
@@ -26888,7 +26894,7 @@ bool AArch64TargetLowering::getIndexedAddressParts(SDNode *N, SDNode *Op,
}
auto IsUndefOrZero = [](SDValue V) {
- return V.isUndef() || isNullOrNullSplat(V, /*AllowUndefs*/ true);
+ return V.isUndefOrPoison() || isNullOrNullSplat(V, /*AllowUndefs*/ true);
};
// If the only user of the value is a scalable vector splat, it is
@@ -26974,10 +26980,11 @@ static void replaceBoolVectorBitcast(SDNode *N,
// Special handling for Clang's __builtin_convertvector. For vectors with <8
// elements, it adds a vector concatenation with undef(s). If we encounter
// this here, we can skip the concat.
- if (Op.getOpcode() == ISD::CONCAT_VECTORS && !Op.getOperand(0).isUndef()) {
+ if (Op.getOpcode() == ISD::CONCAT_VECTORS &&
+ !Op.getOperand(0).isUndefOrPoison()) {
bool AllUndef = true;
for (unsigned I = 1; I < Op.getNumOperands(); ++I)
- AllUndef &= Op.getOperand(I).isUndef();
+ AllUndef &= Op.getOperand(I).isUndefOrPoison();
if (AllUndef)
Op = Op.getOperand(0);
@@ -27076,7 +27083,7 @@ static void ReplaceAddWithADDP(SDNode *N, SmallVectorImpl<SDValue> &Results,
return;
}
- if (Shuf->getOperand(0) != X || !Shuf->getOperand(1)->isUndef())
+ if (Shuf->getOperand(0) != X || !Shuf->getOperand(1)->isUndefOrPoison())
return;
// Check the mask is 1,0,3,2,5,4,...
@@ -28590,7 +28597,7 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorMLoadToSVE(
SDValue PassThru;
bool IsPassThruZeroOrUndef = false;
- if (Load->getPassThru()->isUndef()) {
+ if (Load->getPassThru()->isUndefOrPoison()) {
PassThru = DAG.getUNDEF(ContainerVT);
IsPassThruZeroOrUndef = true;
} else {
@@ -29604,7 +29611,7 @@ SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE(
unsigned MaxSVESize = Subtarget->getMaxSVEVectorSizeInBits();
if (MinSVESize == MaxSVESize && MaxSVESize == VT.getSizeInBits()) {
if (ShuffleVectorInst::isReverseMask(ShuffleMask, ShuffleMask.size()) &&
- Op2.isUndef()) {
+ Op2.isUndefOrPoison()) {
Op = DAG.getNode(ISD::VECTOR_REVERSE, DL, ContainerVT, Op1);
return convertFromScalableVector(DAG, VT, Op);
}
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index bd8d6079e1ba885..8c2e99369062d75 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -7837,7 +7837,7 @@ static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
SDValue FirstOp = Op.getOperand(0);
if (!isa<ConstantSDNode>(FirstOp) &&
llvm::all_of(llvm::drop_begin(Op->ops()), [&FirstOp](const SDUse &U) {
- return U.get().isUndef() || U.get() == FirstOp;
+ return U.get().isUndefOrPoison() || U.get() == FirstOp;
})) {
SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, FirstOp,
DAG.getValueType(MVT::i1));
@@ -7848,9 +7848,9 @@ static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
unsigned Bits32 = 0;
for (unsigned i = 0; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- if (!isa<ConstantSDNode>(V) && !V.isUndef())
+ if (!isa<ConstantSDNode>(V) && !V.isUndefOrPoison())
continue;
- bool BitSet = V.isUndef() ? false : V->getAsZExtVal();
+ bool BitSet = V.isUndefOrPoison() ? false : V->getAsZExtVal();
if (BitSet)
Bits32 |= BoolMask << (i * BitsPerBool);
}
@@ -7860,7 +7860,7 @@ static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
DAG.getConstant(Bits32, dl, MVT::i32));
for (unsigned i = 0; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- if (isa<ConstantSDNode>(V) || V.isUndef())
+ if (isa<ConstantSDNode>(V) || V.isUndefOrPoison())
continue;
Base = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Base, V,
DAG.getConstant(i, dl, MVT::i32));
@@ -8044,7 +8044,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
SDValue Value;
for (unsigned i = 0; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- if (V.isUndef())
+ if (V.isUndefOrPoison())
continue;
if (i > 0)
isOnlyLowElement = false;
@@ -8203,7 +8203,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
SDValue Vec = DAG.getUNDEF(VT);
for (unsigned i = 0 ; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- if (V.isUndef())
+ if (V.isUndefOrPoison())
continue;
SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
@@ -8248,7 +8248,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
SmallVector<ShuffleSourceInfo, 2> Sources;
for (unsigned i = 0; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- if (V.isUndef())
+ if (V.isUndefOrPoison())
continue;
else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
// A shuffle can only come from building a vector from various
@@ -8377,7 +8377,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
SDValue Entry = Op.getOperand(i);
- if (Entry.isUndef())
+ if (Entry.isUndefOrPoison())
continue;
auto Src = llvm::find(Sources, Entry.getOperand(0));
@@ -8577,7 +8577,7 @@ static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
for (int I : ShuffleMask)
VTBLMask.push_back(DAG.getSignedConstant(I, DL, MVT::i32));
- if (V2.getNode()->isUndef())
+ if (V2.getNode()->isUndefOrPoison())
return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
@@ -8684,8 +8684,9 @@ static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG,
// fields in a register into 8 other arbitrary 2-bit fields!
SDValue PredAsVector1 = PromoteMVEPredVector(dl, V1, VT, DAG);
EVT NewVT = PredAsVector1.getValueType();
- SDValue PredAsVector2 = V2.isUndef() ? DAG.getUNDEF(NewVT)
- : PromoteMVEPredVector(dl, V2, VT, DAG);
+ SDValue PredAsVector2 = V2.isUndefOrPoison()
+ ? DAG.getUNDEF(NewVT)
+ : PromoteMVEPredVector(dl, V2, VT, DAG);
assert(PredAsVector2.getValueType() == NewVT &&
"Expected identical vector type in expanded i1 shuffle!");
@@ -8880,7 +8881,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
!isa<ConstantSDNode>(V1.getOperand(0))) {
bool IsScalarToVector = true;
for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
- if (!V1.getOperand(i).isUndef()) {
+ if (!V1.getOperand(i).isUndefOrPoison()) {
IsScalarToVector = false;
break;
}
@@ -8907,7 +8908,8 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
if (isVREVMask(ShuffleMask, VT, 16))
return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
- if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
+ if (ST->hasNEON() && V2->isUndefOrPoison() &&
+ isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
DAG.getConstant(Imm, dl, MVT::i32));
}
@@ -8955,7 +8957,8 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
// ->
// concat(VZIP(v1, v2):0, :1)
//
- if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) {
+ if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS &&
+ V2->isUndefOrPoison()) {
SDValue SubV1 = V1->getOperand(0);
SDValue SubV2 = V1->getOperand(1);
EVT SubVT = SubV1.getValueType();
@@ -9269,11 +9272,11 @@ static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
SDValue Val = DAG.getUNDEF(MVT::v2f64);
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
- if (!Op0.isUndef())
+ if (!Op0.isUndefOrPoison())
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
DAG.getIntPtrConstant(0, dl));
- if (!Op1.isUndef())
+ if (!Op1.isUndefOrPoison())
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
DAG.getIntPtrConstant(1, dl));
@@ -10315,7 +10318,7 @@ static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) {
bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST ||
PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) &&
isZeroVector(PassThru->getOperand(0));
- if (!PassThru.isUndef() && !PassThruIsCastZero)
+ if (!PassThru.isUndefOrPoison() && !PassThruIsCastZero)
Combo = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
return DAG.getMergeValues({Combo, NewLoad.getValue(1)}, dl);
}
@@ -13535,7 +13538,7 @@ static SDValue PerformVSetCCToVCTPCombine(SDNode *N,
// Check first operand is BuildVector of 0,1,2,...
for (unsigned I = 0; I < VT.getVectorNumElements(); I++) {
- if (!Op0.getOperand(I).isUndef() &&
+ if (!Op0.getOperand(I).isUndefOrPoison() &&
!(isa<ConstantSDNode>(Op0.getOperand(I)) &&
Op0.getConstantOperandVal(I) == I))
return SDValue();
@@ -15399,7 +15402,7 @@ PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
// Assume only bit cast to i32 will go away.
if (Elt->getOperand(0).getValueType() == MVT::i32)
++NumOfBitCastedElts;
- } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt))
+ } else if (Elt.isUndefOrPoison() || isa<ConstantSDNode>(Elt))
// Constants are statically casted, thus do not count them as
// relevant operands.
--NumOfRelevantElts;
@@ -15426,7 +15429,7 @@ PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
SDLoc dl(N);
for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
SDValue V = N->getOperand(Idx);
- if (V.isUndef())
+ if (V.isUndefOrPoison())
continue;
if (V.getOpcode() == ISD::BITCAST &&
V->getOperand(0).getValueType() == MVT::i32)
@@ -15494,7 +15497,7 @@ static SDValue PerformVECTOR_REG_CASTCombine(SDNode *N, SelectionDAG &DAG,
if (Op.getValueType() == VT)
return Op;
// VECTOR_REG_CAST undef -> undef
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
return DAG.getUNDEF(VT);
// VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x)
@@ -15719,7 +15722,7 @@ PerformInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
return SDValue();
// Ignore widening patterns.
- if (IdxVal == 0 && Vec.isUndef())
+ if (IdxVal == 0 && Vec.isUndefOrPoison())
return SDValue();
// Subvector must be half the width and an "aligned" insertion.
@@ -15750,7 +15753,8 @@ static SDValue PerformShuffleVMOVNCombine(ShuffleVectorSDNode *N,
SelectionDAG &DAG) {
SDValue Trunc = N->getOperand(0);
EVT VT = Trunc.getValueType();
- if (Trunc.getOpcode() != ARMISD::MVETRUNC || !N->getOperand(1).isUndef())
+ if (Trunc.getOpcode() != ARMISD::MVETRUNC ||
+ !N->getOperand(1).isUndefOrPoison())
return SDValue();
SDLoc DL(Trunc);
@@ -15794,7 +15798,7 @@ static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
SDValue Concat0Op1 = Op0.getOperand(1);
SDValue Concat1Op1 = Op1.getOperand(1);
- if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef())
+ if (!Concat0Op1.isUndefOrPoison() || !Concat1Op1.isUndefOrPoison())
return SDValue();
// Skip the transformation if any of the types are illegal.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -16715,7 +16719,7 @@ static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St,
auto isVMOVNShuffle = [&](ShuffleVectorSDNode *SVN, bool Rev) {
ArrayRef<int> M = SVN->getMask();
unsigned NumElts = ToVT.getVectorNumElements();
- if (SVN->getOperand(1).isUndef())
+ if (SVN->getOperand(1).isUndefOrPoison())
NumElts /= 2;
unsigned Off0 = Rev ? NumElts : 0;
@@ -17404,7 +17408,7 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
static SDValue PerformReduceShuffleCombine(SDNode *N, SelectionDAG &DAG) {
unsigned VecOp = N->getOperand(0).getValueType().isVector() ? 0 : 2;
auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(VecOp));
- if (!Shuf || !Shuf->getOperand(1).isUndef())
+ if (!Shuf || !Shuf->getOperand(1).isUndefOrPoison())
return SDValue();
// Check all elements are used once in the mask.
@@ -17420,7 +17424,8 @@ static SDValue PerformReduceShuffleCombine(SDNode *N, SelectionDAG &DAG) {
if (N->getNumOperands() != VecOp + 1) {
auto *Shuf2 = dyn_cast<ShuffleVectorSDNode>(N->getOperand(VecOp + 1));
- if (!Shuf2 || !Shuf2->getOperand(1).isUndef() || Shuf2->getMask() != Mask)
+ if (!Shuf2 || !Shuf2->getOperand(1).isUndefOrPoison() ||
+ Shuf2->getMask() != Mask)
return SDValue();
}
@@ -17443,9 +17448,9 @@ static SDValue PerformVMOVNCombine(SDNode *N,
// VMOVNT a undef -> a
// VMOVNB a undef -> a
// VMOVNB undef a -> a
- if (Op1->isUndef())
+ if (Op1->isUndefOrPoison())
return Op0;
- if (Op0->isUndef() && !IsTop)
+ if (Op0->isUndefOrPoison() && !IsTop)
return Op1;
// VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b)
@@ -17500,7 +17505,8 @@ static SDValue PerformVQDMULHCombine(SDNode *N,
auto *Shuf1 = dyn_cast<ShuffleVectorSDNode>(RHS);
// Turn VQDMULH(shuffle, shuffle) -> shuffle(VQDMULH)
if (Shuf0 && Shuf1 && Shuf0->getMask().equals(Shuf1->getMask()) &&
- LHS.getOperand(1).isUndef() && RHS.getOperand(1).isUndef() &&
+ LHS.getOperand(1).isUndefOrPoison() &&
+ RHS.getOperand(1).isUndefOrPoison() &&
(LHS.hasOneUse() || RHS.hasOneUse() || LHS == RHS)) {
SDLoc DL(N);
SDValue NewBinOp = DCI.DAG.getNode(N->getOpcode(), DL, VT,
@@ -18658,7 +18664,7 @@ SDValue ARMTargetLowering::PerformMVETruncCombine(
SDLoc DL(N);
// MVETrunc(Undef, Undef) -> Undef
- if (all_of(N->ops(), [](SDValue Op) { return Op.isUndef(); }))
+ if (all_of(N->ops(), [](SDValue Op) { return Op.isUndefOrPoison(); }))
return DAG.getUNDEF(VT);
// MVETrunc(MVETrunc a b, MVETrunc c, d) -> MVETrunc
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index bdc1ac7c7da5891..45abd25183d1839 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2345,7 +2345,7 @@ bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
// If both vector operands for the shuffle are the same vector, the mask will
// contain only elements from the first one and the second one will be undef.
- if (N->getOperand(1).isUndef()) {
+ if (N->getOperand(1).isUndefOrPoison()) {
ShiftElts = 0;
Swap = true;
unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
@@ -2385,7 +2385,7 @@ bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
// If both vector operands for the shuffle are the same vector, the mask will
// contain only elements from the first one and the second one will be undef.
- if (N->getOperand(1).isUndef()) {
+ if (N->getOperand(1).isUndefOrPoison()) {
assert(M0 < 4 && "Indexing into an undef vector?");
if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
return false;
@@ -2483,7 +2483,7 @@ bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
// If both vector operands for the shuffle are the same vector, the mask will
// contain only elements from the first one and the second one will be undef.
- if (N->getOperand(1).isUndef()) {
+ if (N->getOperand(1).isUndefOrPoison()) {
if ((M0 | M1) < 2) {
DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
Swap = false;
@@ -2560,7 +2560,8 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
// See if all of the elements in the buildvector agree across.
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
- if (N->getOperand(i).isUndef()) continue;
+ if (N->getOperand(i).isUndefOrPoison())
+ continue;
// If the element isn't a constant, bail fully out.
if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
@@ -2605,7 +2606,8 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
// Check to see if this buildvec has a single non-undef value in its elements.
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
- if (N->getOperand(i).isUndef()) continue;
+ if (N->getOperand(i).isUndefOrPoison())
+ continue;
if (!OpVal.getNode())
OpVal = N->getOperand(i);
else if (OpVal != N->getOperand(i))
@@ -8629,7 +8631,7 @@ bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
return false;
RLI.Ptr = LD->getBasePtr();
- if (LD->isIndexed() && !LD->getOffset().isUndef()) {
+ if (LD->isIndexed() && !LD->getOffset().isUndefOrPoison()) {
assert(LD->getAddressingMode() == ISD::PRE_INC &&
"Non-pre-inc AM on PPC?");
RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
@@ -9456,7 +9458,7 @@ static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
if (V->isConstant())
return false;
for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
- if (V->getOperand(i).isUndef())
+ if (V->getOperand(i).isUndefOrPoison())
return false;
// We want to expand nodes that represent load-and-splat even if the
// loaded value is a floating point truncation or conversion to int.
@@ -9676,7 +9678,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
// BUILD_VECTOR is a separate use of the value.
unsigned NumUsesOfInputLD = 128 / ElementSize;
for (SDValue BVInOp : Op->ops())
- if (BVInOp.isUndef())
+ if (BVInOp.isUndefOrPoison())
NumUsesOfInputLD--;
// Exclude somes case where LD_SPLAT is worse than scalar_to_vector:
@@ -10007,7 +10009,7 @@ SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
unsigned CurrentElement = Mask[i];
// If 2nd operand is undefined, we should only look for element 7 in the
// Mask.
- if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
+ if (V2.isUndefOrPoison() && CurrentElement != VINSERTBSrcElem)
continue;
bool OtherElementsInOrder = true;
@@ -10019,8 +10021,9 @@ SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
// If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
// from V2 [16,31] and vice versa. Unless the 2nd operand is undefined,
// in which we always assume we're always picking from the 1st operand.
- int MaskOffset =
- (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
+ int MaskOffset = (!V2.isUndefOrPoison() && CurrentElement < BytesInVector)
+ ? BytesInVector
+ : 0;
if (Mask[j] != OriginalOrder[j] + MaskOffset) {
OtherElementsInOrder = false;
break;
@@ -10031,7 +10034,7 @@ SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
// in the vector we should insert into.
if (OtherElementsInOrder) {
// If 2nd operand is undefined, we assume no shifts and no swapping.
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
ShiftElts = 0;
Swap = false;
} else {
@@ -10053,7 +10056,7 @@ SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
// optionally with VECSHL if shift is required.
if (Swap)
std::swap(V1, V2);
- if (V2.isUndef())
+ if (V2.isUndefOrPoison())
V2 = V1;
if (ShiftElts) {
SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
@@ -10122,7 +10125,7 @@ SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
// If both vector operands for the shuffle are the same vector, the mask
// will contain only elements from the first one and the second one will be
// undef.
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
ShiftElts = 0;
unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
TargetOrder = OriginalOrderLow;
@@ -10159,7 +10162,7 @@ SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
// optionally with VECSHL if shift is required.
if (Swap)
std::swap(V1, V2);
- if (V2.isUndef())
+ if (V2.isUndefOrPoison())
V2 = V1;
SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
if (ShiftElts) {
@@ -10314,7 +10317,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// combine it because that will just produce multiple loads.
bool IsPermutedLoad = false;
const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
- if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
+ if (InputLoad && Subtarget.hasVSX() && V2.isUndefOrPoison() &&
(PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
InputLoad->hasOneUse()) {
bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
@@ -10374,7 +10377,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
if (Subtarget.hasP9Vector() &&
PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
isLittleEndian)) {
- if (V2.isUndef())
+ if (V2.isUndefOrPoison())
V2 = V1;
else if (Swap)
std::swap(V1, V2);
@@ -10412,8 +10415,8 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
if (Swap)
std::swap(V1, V2);
SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
- SDValue Conv2 =
- DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
+ SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
+ V2.isUndefOrPoison() ? V1 : V2);
SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
DAG.getConstant(ShiftElts, dl, MVT::i32));
@@ -10425,8 +10428,8 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
if (Swap)
std::swap(V1, V2);
SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
- SDValue Conv2 =
- DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
+ SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
+ V2.isUndefOrPoison() ? V1 : V2);
SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
DAG.getConstant(ShiftElts, dl, MVT::i32));
@@ -10454,7 +10457,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
}
if (Subtarget.hasVSX()) {
- if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
+ if (V2.isUndefOrPoison() && PPC::isSplatShuffleMask(SVOp, 4)) {
int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
@@ -10464,7 +10467,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
}
// Left shifts of 8 bytes are actually swaps. Convert accordingly.
- if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
+ if (V2.isUndefOrPoison() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
@@ -10474,7 +10477,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// Cases that are handled by instructions that take permute immediates
// (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
// selected by the instruction selector.
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
if (PPC::isSplatShuffleMask(SVOp, 1) ||
PPC::isSplatShuffleMask(SVOp, 2) ||
PPC::isSplatShuffleMask(SVOp, 4) ||
@@ -10575,7 +10578,8 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
// vector that will get spilled to the constant pool.
- if (V2.isUndef()) V2 = V1;
+ if (V2.isUndefOrPoison())
+ V2 = V1;
return LowerVPERM(Op, DAG, PermMask, VT, V1, V2);
}
@@ -15013,7 +15017,7 @@ combineElementTruncationToVectorTruncation(SDNode *N,
if (Is32Bit) {
// For 32-bit values, we need to add an FP_ROUND node (if we made it
// here, we know that all inputs are extending loads so this is safe).
- if (In.isUndef())
+ if (In.isUndefOrPoison())
Ops.push_back(DAG.getUNDEF(SrcVT));
else {
SDValue Trunc =
@@ -15022,7 +15026,8 @@ combineElementTruncationToVectorTruncation(SDNode *N,
Ops.push_back(Trunc);
}
} else
- Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
+ Ops.push_back(In.isUndefOrPoison() ? DAG.getUNDEF(SrcVT)
+ : In.getOperand(0));
}
unsigned Opcode;
@@ -15715,13 +15720,13 @@ static bool isSplatBV(SDValue Op) {
// Find first non-undef input.
for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
FirstOp = Op.getOperand(i);
- if (!FirstOp.isUndef())
+ if (!FirstOp.isUndefOrPoison())
break;
}
// All inputs are undef or the same as the first non-undef input.
for (int i = 1, e = Op.getNumOperands(); i < e; i++)
- if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
+ if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndefOrPoison())
return false;
return true;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 13ce566f8def6c3..27620ab364a0cba 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3348,7 +3348,7 @@ getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget,
const SDLoc &DL, EVT VT, SDValue Passthru, SDValue Op,
SDValue Offset, SDValue Mask, SDValue VL,
unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
- if (Passthru.isUndef())
+ if (Passthru.isUndefOrPoison())
Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp};
@@ -3360,7 +3360,7 @@ getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL,
EVT VT, SDValue Passthru, SDValue Op, SDValue Offset, SDValue Mask,
SDValue VL,
unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
- if (Passthru.isUndef())
+ if (Passthru.isUndefOrPoison())
Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp};
@@ -3432,7 +3432,7 @@ static std::optional<VIDSequence> isSimpleVIDSequence(SDValue Op,
SmallVector<std::optional<APInt>> Elts(Op.getNumOperands());
const unsigned OpSize = Op.getScalarValueSizeInBits();
for (auto [Idx, Elt] : enumerate(Op->op_values())) {
- if (Elt.isUndef()) {
+ if (Elt.isUndefOrPoison()) {
Elts[Idx] = std::nullopt;
continue;
}
@@ -3599,8 +3599,8 @@ static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG,
SDValue DominantValue;
unsigned MostCommonCount = 0;
DenseMap<SDValue, unsigned> ValueCounts;
- unsigned NumUndefElts =
- count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
+ unsigned NumUndefElts = count_if(
+ Op->op_values(), [](const SDValue &V) { return V.isUndefOrPoison(); });
// Track the number of scalar loads we know we'd be inserting, estimated as
// any non-zero floating-point constant. Other kinds of element are either
@@ -3610,7 +3610,7 @@ static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG,
unsigned NumScalarLoads = 0;
for (SDValue V : Op->op_values()) {
- if (V.isUndef())
+ if (V.isUndefOrPoison())
continue;
unsigned &Count = ValueCounts[V];
@@ -3648,7 +3648,7 @@ static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG,
// is also better than using vmerge.vx as it avoids the need to
// materialize the mask in a vector register.
if (SDValue LastOp = Op->getOperand(Op->getNumOperands() - 1);
- !LastOp.isUndef() && ValueCounts[LastOp] == 1 &&
+ !LastOp.isUndefOrPoison() && ValueCounts[LastOp] == 1 &&
LastOp != DominantValue) {
Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
auto OpCode =
@@ -3664,7 +3664,7 @@ static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG,
MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
for (const auto &OpIdx : enumerate(Op->ops())) {
const SDValue &V = OpIdx.value();
- if (V.isUndef() || !Processed.insert(V).second)
+ if (V.isUndefOrPoison() || !Processed.insert(V).second)
continue;
if (ValueCounts[V] == 1) {
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
@@ -3741,7 +3741,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
for (unsigned I = 0; I < NumElts;) {
SDValue V = Op.getOperand(I);
- bool BitValue = !V.isUndef() && V->getAsZExtVal();
+ bool BitValue = !V.isUndefOrPoison() && V->getAsZExtVal();
Bits |= ((uint64_t)BitValue << BitPos);
++BitPos;
++I;
@@ -3871,7 +3871,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
// Construct the amalgamated value at this larger vector type.
for (const auto &OpIdx : enumerate(Op->op_values())) {
const auto &SeqV = OpIdx.value();
- if (!SeqV.isUndef())
+ if (!SeqV.isUndefOrPoison())
SplatValue |=
((SeqV->getAsZExtVal() & EltMask) << (OpIdx.index() * EltBitSize));
}
@@ -3928,7 +3928,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
// Construct the amalgamated value which can be splatted as this larger
// vector type.
for (const auto &SeqV : Sequence) {
- if (!SeqV.isUndef())
+ if (!SeqV.isUndefOrPoison())
SplatValue |=
((SeqV->getAsZExtVal() & EltMask) << (EltIdx * EltBitSize));
EltIdx++;
@@ -4200,8 +4200,8 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
// vslide1down path, we should be able to fold the vselect into the final
// vslidedown (for the undef tail) for the first half w/ masking.
unsigned NumElts = VT.getVectorNumElements();
- unsigned NumUndefElts =
- count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
+ unsigned NumUndefElts = count_if(
+ Op->op_values(), [](const SDValue &V) { return V.isUndefOrPoison(); });
unsigned NumDefElts = NumElts - NumUndefElts;
if (NumDefElts >= 8 && NumDefElts > NumElts / 2 &&
ContainerVT.bitsLE(getLMUL1VT(ContainerVT))) {
@@ -4260,7 +4260,7 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
// we use for integer constants here?
unsigned UndefCount = 0;
for (const SDValue &V : Op->ops()) {
- if (V.isUndef()) {
+ if (V.isUndefOrPoison()) {
UndefCount++;
continue;
}
@@ -4286,7 +4286,7 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
SDValue Vec;
UndefCount = 0;
for (SDValue V : Op->ops()) {
- if (V.isUndef()) {
+ if (V.isUndefOrPoison()) {
UndefCount++;
continue;
}
@@ -4367,7 +4367,7 @@ static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
// If the hi bits of the splat are undefined, then it's fine to just splat Lo
// even if it might be sign extended.
- if (Hi.isUndef())
+ if (Hi.isUndefOrPoison())
return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
// Fall back to a stack store and stride x0 vector load.
@@ -4393,7 +4393,7 @@ static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
MVT VT, const SDLoc &DL, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
- bool HasPassthru = Passthru && !Passthru.isUndef();
+ bool HasPassthru = Passthru && !Passthru.isUndefOrPoison();
if (!HasPassthru && !Passthru)
Passthru = DAG.getUNDEF(VT);
@@ -4501,7 +4501,7 @@ static SDValue lowerScalarInsert(SDValue Scalar, SDValue VL, MVT VT,
static SDValue getSingleShuffleSrc(MVT VT, MVT ContainerVT, SDValue V1,
SDValue V2) {
- if (V2.isUndef() &&
+ if (V2.isUndefOrPoison() &&
RISCVTargetLowering::getLMUL(ContainerVT) != RISCVII::VLMUL::LMUL_8)
return V1;
@@ -4933,9 +4933,9 @@ static SDValue getWideningInterleave(SDValue EvenV, SDValue OddV,
// FIXME: Not only does this optimize the code, it fixes some correctness
// issues because MIR does not have freeze.
- if (EvenV.isUndef())
+ if (EvenV.isUndefOrPoison())
return getWideningSpread(OddV, 2, 1, DL, DAG);
- if (OddV.isUndef())
+ if (OddV.isUndefOrPoison())
return getWideningSpread(EvenV, 2, 0, DL, DAG);
MVT VecVT = EvenV.getSimpleValueType();
@@ -5032,7 +5032,7 @@ static SDValue lowerBitreverseShuffle(ShuffleVectorSDNode *SVN,
if (!ShuffleVectorInst::isReverseMask(SVN->getMask(),
SVN->getMask().size()) ||
- !SVN->getOperand(1).isUndef())
+ !SVN->getOperand(1).isUndefOrPoison())
return SDValue();
unsigned ViaEltSize = std::max((uint64_t)8, PowerOf2Ceil(NumElts));
@@ -5404,8 +5404,8 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
// Promote i1 shuffle to i8 shuffle.
MVT WidenVT = MVT::getVectorVT(MVT::i8, VT.getVectorElementCount());
V1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, V1);
- V2 = V2.isUndef() ? DAG.getUNDEF(WidenVT)
- : DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, V2);
+ V2 = V2.isUndefOrPoison() ? DAG.getUNDEF(WidenVT)
+ : DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, V2);
SDValue Shuffled = DAG.getVectorShuffle(WidenVT, DL, V1, V2, SVN->getMask());
return DAG.getSetCC(DL, VT, Shuffled, DAG.getConstant(0, DL, WidenVT),
ISD::SETNE);
@@ -5555,7 +5555,7 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
return convertFromScalableVector(VT, Res, DAG, Subtarget);
}
- if (ShuffleVectorInst::isReverseMask(Mask, NumElts) && V2.isUndef())
+ if (ShuffleVectorInst::isReverseMask(Mask, NumElts) && V2.isUndefOrPoison())
return DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V1);
// If this is a deinterleave(2,4,8) and we can widen the vector, then we can
@@ -5620,8 +5620,8 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
// Handle any remaining single source shuffles
- assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
- if (V2.isUndef()) {
+ assert(!V1.isUndefOrPoison() && "Unexpected shuffle canonicalization");
+ if (V2.isUndefOrPoison()) {
// We might be able to express the shuffle as a bitrotate. But even if we
// don't have Zvkb and have to expand, the expanded sequence of approx. 2
// shifts and a vor will have a higher throughput than a vrgather.
@@ -7398,7 +7398,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
for (const auto &OpIdx : enumerate(Op->ops())) {
SDValue SubVec = OpIdx.value();
// Don't insert undef subvectors.
- if (SubVec.isUndef())
+ if (SubVec.isUndefOrPoison())
continue;
Vec =
DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
@@ -9327,7 +9327,7 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
Vec, Vec, ValLo, I32Mask, InsertI64VL);
// If the source vector is undef don't pass along the tail elements from
// the previous slide1down.
- SDValue Tail = Vec.isUndef() ? Vec : ValInVec;
+ SDValue Tail = Vec.isUndefOrPoison() ? Vec : ValInVec;
ValInVec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32ContainerVT,
Tail, ValInVec, ValHi, I32Mask, InsertI64VL);
// Bitcast back to the right container type.
@@ -9691,7 +9691,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
// Assume Policy operand is the last operand.
uint64_t Policy = Operands[NumOps - 1]->getAsZExtVal();
// We don't need to select maskedoff if it's undef.
- if (MaskedOff.isUndef())
+ if (MaskedOff.isUndefOrPoison())
return Vec;
// TAMU
if (Policy == RISCVII::TAIL_AGNOSTIC)
@@ -9975,7 +9975,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue VL = getVLOperand(Op);
SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
- if (Op.getOperand(1).isUndef())
+ if (Op.getOperand(1).isUndefOrPoison())
return SplattedVal;
SDValue SplattedIdx =
DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
@@ -10646,7 +10646,7 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
unsigned OrigIdx = Op.getConstantOperandVal(2);
const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
- if (OrigIdx == 0 && Vec.isUndef())
+ if (OrigIdx == 0 && Vec.isUndefOrPoison())
return Op;
// We don't have the ability to slide mask vectors up indexed by their i1
@@ -10784,7 +10784,7 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
// subregister operation). See below for how our VSLIDEUP works. We go via a
// LMUL=1 type to avoid allocating a large register group to hold our
// subvector.
- if (RemIdx.isZero() && (ExactlyVecRegSized || Vec.isUndef())) {
+ if (RemIdx.isZero() && (ExactlyVecRegSized || Vec.isUndefOrPoison())) {
if (SubVecVT.isFixedLengthVector()) {
// We may get NoSubRegister if inserting at index 0 and the subvec
// container is the same as the vector, e.g. vec=v4i32,subvec=v4i32,idx=0
@@ -14073,7 +14073,7 @@ static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG,
SDValue ScalarV = Reduce.getOperand(2);
EVT ScalarVT = ScalarV.getValueType();
if (ScalarV.getOpcode() == ISD::INSERT_SUBVECTOR &&
- ScalarV.getOperand(0)->isUndef() &&
+ ScalarV.getOperand(0)->isUndefOrPoison() &&
isNullConstant(ScalarV.getOperand(2)))
ScalarV = ScalarV.getOperand(1);
@@ -15532,7 +15532,8 @@ struct NodeExtensionHelper {
"Unexpected Opcode");
// The pasthru must be undef for tail agnostic.
- if (Opc == RISCVISD::VMV_V_X_VL && !OrigOperand.getOperand(0).isUndef())
+ if (Opc == RISCVISD::VMV_V_X_VL &&
+ !OrigOperand.getOperand(0).isUndefOrPoison())
return;
// Get the scalar value.
@@ -15638,7 +15639,7 @@ struct NodeExtensionHelper {
case RISCVISD::VFMV_V_F_VL: {
MVT VT = OrigOperand.getSimpleValueType();
- if (!OrigOperand.getOperand(0).isUndef())
+ if (!OrigOperand.getOperand(0).isUndefOrPoison())
break;
SDValue Op = OrigOperand.getOperand(1);
@@ -16176,7 +16177,7 @@ static SDValue combineVWADDSUBWSelect(SDNode *N, SelectionDAG &DAG) {
// Passthru should be undef
SDValue Passthru = N->getOperand(2);
- if (!Passthru.isUndef())
+ if (!Passthru.isUndefOrPoison())
return SDValue();
// Mask should be all ones
@@ -16188,7 +16189,7 @@ static SDValue combineVWADDSUBWSelect(SDNode *N, SelectionDAG &DAG) {
SDValue Z = MergeOp->getOperand(2);
if (Z.getOpcode() == ISD::INSERT_SUBVECTOR &&
- (isNullOrNullSplat(Z.getOperand(0)) || Z.getOperand(0).isUndef()))
+ (isNullOrNullSplat(Z.getOperand(0)) || Z.getOperand(0).isUndefOrPoison()))
Z = Z.getOperand(1);
if (!ISD::isConstantSplatVectorAllZeros(Z.getNode()))
@@ -17345,7 +17346,7 @@ static SDValue performBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
SmallVector<SDValue> LHSOps;
SmallVector<SDValue> RHSOps;
for (SDValue Op : N->ops()) {
- if (Op.isUndef()) {
+ if (Op.isUndefOrPoison()) {
// We can't form a divide or remainder from undef.
if (!DAG.isSafeToSpeculativelyExecute(Opcode))
return SDValue();
@@ -17646,7 +17647,7 @@ static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG,
if (N->getOpcode() == RISCVISD::ADD_VL) {
SDValue AddPassthruOp = N->getOperand(2);
- if (!AddPassthruOp.isUndef())
+ if (!AddPassthruOp.isUndefOrPoison())
return SDValue();
}
@@ -17669,7 +17670,7 @@ static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG,
SDValue MulPassthruOp = MulOp.getOperand(2);
- if (!MulPassthruOp.isUndef())
+ if (!MulPassthruOp.isUndefOrPoison())
return SDValue();
auto [AddMask, AddVL] = [](SDNode *N, SelectionDAG &DAG,
@@ -17754,7 +17755,7 @@ static bool matchIndexAsShuffle(EVT VT, SDValue Index, SDValue Mask,
for (unsigned i = 0; i < Index->getNumOperands(); i++) {
// TODO: We've found an active bit of UB, and could be
// more aggressive here if desired.
- if (Index->getOperand(i)->isUndef())
+ if (Index->getOperand(i)->isUndefOrPoison())
return false;
uint64_t C = Index->getConstantOperandVal(i);
if (C % ElementSize != 0)
@@ -17797,7 +17798,7 @@ static bool matchIndexAsWiderOp(EVT VT, SDValue Index, SDValue Mask,
for (unsigned i = 0; i < Index->getNumOperands(); i++) {
// TODO: We've found an active bit of UB, and could be
// more aggressive here if desired.
- if (Index->getOperand(i)->isUndef())
+ if (Index->getOperand(i)->isUndefOrPoison())
return false;
// TODO: This offset check is too strict if we support fully
// misaligned memory operations.
@@ -17886,14 +17887,15 @@ static SDValue combineTruncToVnclip(SDNode *N, SelectionDAG &DAG,
auto MatchMinMax = [&VL, &Mask](SDValue V, unsigned Opc, unsigned OpcVL,
APInt &SplatVal) {
if (V.getOpcode() != Opc &&
- !(V.getOpcode() == OpcVL && V.getOperand(2).isUndef() &&
+ !(V.getOpcode() == OpcVL && V.getOperand(2).isUndefOrPoison() &&
V.getOperand(3) == Mask && V.getOperand(4) == VL))
return SDValue();
SDValue Op = V.getOperand(1);
// Peek through conversion between fixed and scalable vectors.
- if (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op.getOperand(0).isUndef() &&
+ if (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ Op.getOperand(0).isUndefOrPoison() &&
isNullConstant(Op.getOperand(2)) &&
Op.getOperand(1).getValueType().isFixedLengthVector() &&
Op.getOperand(1).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
@@ -17904,8 +17906,8 @@ static SDValue combineTruncToVnclip(SDNode *N, SelectionDAG &DAG,
if (ISD::isConstantSplatVector(Op.getNode(), SplatVal))
return V.getOperand(0);
- if (Op.getOpcode() == RISCVISD::VMV_V_X_VL && Op.getOperand(0).isUndef() &&
- Op.getOperand(2) == VL) {
+ if (Op.getOpcode() == RISCVISD::VMV_V_X_VL &&
+ Op.getOperand(0).isUndefOrPoison() && Op.getOperand(2) == VL) {
if (auto *Op1 = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
SplatVal =
Op1->getAPIntValue().sextOrTrunc(Op.getScalarValueSizeInBits());
@@ -18152,7 +18154,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
if (Op0->getOpcode() == RISCVISD::BuildPairF64)
return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
- if (Op0->isUndef()) {
+ if (Op0->isUndefOrPoison()) {
SDValue Lo = DAG.getUNDEF(MVT::i32);
SDValue Hi = DAG.getUNDEF(MVT::i32);
return DCI.CombineTo(N, Lo, Hi);
@@ -18859,7 +18861,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
APInt NewC(Val.getValueSizeInBits(), 0);
uint64_t EltSize = Val.getScalarValueSizeInBits();
for (unsigned i = 0; i < Val.getNumOperands(); i++) {
- if (Val.getOperand(i).isUndef())
+ if (Val.getOperand(i).isUndefOrPoison())
continue;
NewC.insertBits(Val.getConstantOperandAPInt(i).trunc(EltSize),
i * EltSize);
@@ -18972,7 +18974,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
// scalar input.
unsigned ScalarSize = Scalar.getValueSizeInBits();
unsigned EltWidth = VT.getScalarSizeInBits();
- if (ScalarSize > EltWidth && Passthru.isUndef())
+ if (ScalarSize > EltWidth && Passthru.isUndefOrPoison())
if (SimplifyDemandedLowBitsHelper(1, EltWidth))
return SDValue(N, 0);
@@ -18991,7 +18993,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
// Try to remove vector->scalar->vector if the scalar->vector is inserting
// into an undef vector.
// TODO: Could use a vslide or vmv.v.v for non-undef.
- if (N->getOperand(0).isUndef() &&
+ if (N->getOperand(0).isUndefOrPoison() &&
Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
isNullConstant(Src.getOperand(1)) &&
Src.getOperand(0).getValueType().isScalableVector()) {
@@ -19011,7 +19013,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
SDValue Scalar = N->getOperand(1);
SDValue VL = N->getOperand(2);
- if (Scalar.getOpcode() == RISCVISD::VMV_X_S && Passthru.isUndef() &&
+ if (Scalar.getOpcode() == RISCVISD::VMV_X_S && Passthru.isUndefOrPoison() &&
Scalar.getOperand(0).getValueType() == N->getValueType(0))
return Scalar.getOperand(0);
@@ -19033,7 +19035,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
// no purpose.
if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
Const && !Const->isZero() && isInt<5>(Const->getSExtValue()) &&
- VT.bitsLE(getLMUL1VT(VT)) && Passthru.isUndef())
+ VT.bitsLE(getLMUL1VT(VT)) && Passthru.isUndefOrPoison())
return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
break;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 744e4e740cb2102..fd33017207e4054 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -4086,7 +4086,8 @@ static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
Vec->ops().slice(IdxVal, ElemsPerChunk));
// Check if we're extracting the upper undef of a widening pattern.
- if (Vec.getOpcode() == ISD::INSERT_SUBVECTOR && Vec.getOperand(0).isUndef() &&
+ if (Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ Vec.getOperand(0).isUndefOrPoison() &&
Vec.getOperand(1).getValueType().getVectorNumElements() <= IdxVal &&
isNullConstant(Vec.getOperand(2)))
return DAG.getUNDEF(ResultVT);
@@ -4121,7 +4122,7 @@ static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
assert((vectorWidth == 128 || vectorWidth == 256) &&
"Unsupported vector width");
// Inserting UNDEF is Result
- if (Vec.isUndef())
+ if (Vec.isUndefOrPoison())
return Result;
EVT VT = Vec.getValueType();
EVT ElVT = VT.getVectorElementType();
@@ -4166,7 +4167,7 @@ static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
unsigned NumSrcElts = VecVT.getVectorNumElements();
ArrayRef<SDUse> Hi = Vec->ops().drop_front(NumSrcElts / 2);
if (all_of(Hi, [&](SDValue V) {
- return V.isUndef() || (ZeroNewElements && X86::isZeroNode(V));
+ return V.isUndefOrPoison() || (ZeroNewElements && X86::isZeroNode(V));
}))
Vec = extract128BitVector(Vec, 0, DAG, dl);
}
@@ -4230,7 +4231,7 @@ static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops,
if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2)) {
// insert_subvector(undef, x, lo)
- if (Idx == 0 && Src.isUndef()) {
+ if (Idx == 0 && Src.isUndefOrPoison()) {
Ops.push_back(Sub);
Ops.push_back(DAG.getUNDEF(SubVT));
return true;
@@ -4262,7 +4263,7 @@ static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops,
return true;
}
// insert_subvector(undef, x, hi)
- if (Src.isUndef()) {
+ if (Src.isUndefOrPoison()) {
Ops.push_back(DAG.getUNDEF(SubVT));
Ops.push_back(Sub);
return true;
@@ -4287,7 +4288,7 @@ static SDValue isUpperSubvectorUndef(SDValue V, const SDLoc &DL,
assert((NumSubOps % 2) == 0 && "Unexpected number of subvectors");
ArrayRef<SDValue> UpperOps(SubOps.begin() + HalfNumSubOps, SubOps.end());
- if (any_of(UpperOps, [](SDValue Op) { return !Op.isUndef(); }))
+ if (any_of(UpperOps, [](SDValue Op) { return !Op.isUndefOrPoison(); }))
return SDValue();
EVT HalfVT = V.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
@@ -4496,10 +4497,10 @@ static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
unsigned IdxVal = Op.getConstantOperandVal(2);
// Inserting undef is a nop. We can just return the original vector.
- if (SubVec.isUndef())
+ if (SubVec.isUndefOrPoison())
return Vec;
- if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
+ if (IdxVal == 0 && Vec.isUndefOrPoison()) // the operation is legal
return Op;
MVT OpVT = Op.getSimpleValueType();
@@ -4545,7 +4546,7 @@ static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
Undef, SubVec, ZeroIdx);
- if (Vec.isUndef()) {
+ if (Vec.isUndefOrPoison()) {
assert(IdxVal != 0 && "Unexpected index");
SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
DAG.getTargetConstant(IdxVal, dl, MVT::i8));
@@ -4556,7 +4557,7 @@ static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
assert(IdxVal != 0 && "Unexpected index");
// If upper elements of Vec are known undef, then just shift into place.
if (llvm::all_of(Vec->ops().slice(IdxVal + SubVecNumElems),
- [](SDValue V) { return V.isUndef(); })) {
+ [](SDValue V) { return V.isUndefOrPoison(); })) {
SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
DAG.getTargetConstant(IdxVal, dl, MVT::i8));
} else {
@@ -4741,15 +4742,17 @@ void llvm::createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
// Attempt to constant fold, else just create a VECTOR_SHUFFLE.
static SDValue getVectorShuffle(SelectionDAG &DAG, EVT VT, const SDLoc &dl,
SDValue V1, SDValue V2, ArrayRef<int> Mask) {
- if ((ISD::isBuildVectorOfConstantSDNodes(V1.getNode()) || V1.isUndef()) &&
- (ISD::isBuildVectorOfConstantSDNodes(V2.getNode()) || V2.isUndef())) {
+ if ((ISD::isBuildVectorOfConstantSDNodes(V1.getNode()) ||
+ V1.isUndefOrPoison()) &&
+ (ISD::isBuildVectorOfConstantSDNodes(V2.getNode()) ||
+ V2.isUndefOrPoison())) {
SmallVector<SDValue> Ops(Mask.size(), DAG.getUNDEF(VT.getScalarType()));
for (int I = 0, NumElts = Mask.size(); I != NumElts; ++I) {
int M = Mask[I];
if (M < 0)
continue;
SDValue V = (M < NumElts) ? V1 : V2;
- if (V.isUndef())
+ if (V.isUndefOrPoison())
continue;
Ops[I] = V.getOperand(M % NumElts);
}
@@ -5002,7 +5005,7 @@ static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
};
// Handle UNDEFs.
- if (Op.isUndef()) {
+ if (Op.isUndefOrPoison()) {
APInt UndefSrcElts = APInt::getAllOnes(NumElts);
SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
return CastBitData(UndefSrcElts, SrcEltBits);
@@ -5785,7 +5788,7 @@ static void computeZeroableShuffleElements(ArrayRef<int> Mask,
if ((Size % V.getNumOperands()) == 0) {
int Scale = Size / V->getNumOperands();
SDValue Op = V.getOperand(M / Scale);
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
KnownUndef.setBit(i);
if (X86::isZeroNode(Op))
KnownZero.setBit(i);
@@ -5811,7 +5814,7 @@ static void computeZeroableShuffleElements(ArrayRef<int> Mask,
bool AllZero = true;
for (int j = 0; j < Scale; ++j) {
SDValue Op = V.getOperand((M * Scale) + j);
- AllUndef &= Op.isUndef();
+ AllUndef &= Op.isUndefOrPoison();
AllZero &= X86::isZeroNode(Op);
}
if (AllUndef)
@@ -5880,7 +5883,7 @@ static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
M %= Size;
// We are referencing an UNDEF input.
- if (V.isUndef()) {
+ if (V.isUndefOrPoison()) {
KnownUndef.setBit(i);
continue;
}
@@ -5905,7 +5908,7 @@ static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
SDValue Vec = V.getOperand(0);
int NumVecElts = Vec.getValueType().getVectorNumElements();
- if (Vec.isUndef() && Size == NumVecElts) {
+ if (Vec.isUndefOrPoison() && Size == NumVecElts) {
int Idx = V.getConstantOperandVal(2);
int NumSubElts = V.getOperand(1).getValueType().getVectorNumElements();
if (M < Idx || (Idx + NumSubElts) <= M)
@@ -6118,7 +6121,7 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
InsertIdx *= (MaxElts / NumElts);
ExtractIdx *= (MaxElts / NumSubSrcBCElts);
NumSubElts *= (MaxElts / NumElts);
- bool SrcIsUndef = Src.isUndef();
+ bool SrcIsUndef = Src.isUndefOrPoison();
for (int i = 0; i != (int)MaxElts; ++i)
Mask.push_back(SrcIsUndef ? SM_SentinelUndef : i);
for (int i = 0; i != (int)NumSubElts; ++i)
@@ -6134,7 +6137,7 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
if (Depth > 0 && InsertIdx == NumSubElts && NumElts == (2 * NumSubElts) &&
NumBitsPerElt == 64 && NumSizeInBits == 512 &&
Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
- Src.getOperand(0).isUndef() &&
+ Src.getOperand(0).isUndefOrPoison() &&
Src.getOperand(1).getValueType() == SubVT &&
Src.getConstantOperandVal(2) == 0) {
for (int i = 0; i != (int)NumSubElts; ++i)
@@ -6294,9 +6297,9 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
// lanes), we can treat this as a truncation shuffle.
bool Offset0 = false, Offset1 = false;
if (Opcode == X86ISD::PACKSS) {
- if ((!(N0.isUndef() || EltsLHS.isZero()) &&
+ if ((!(N0.isUndefOrPoison() || EltsLHS.isZero()) &&
DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
- (!(N1.isUndef() || EltsRHS.isZero()) &&
+ (!(N1.isUndefOrPoison() || EltsRHS.isZero()) &&
DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
return false;
// We can't easily fold ASHR into a shuffle, but if it was feeding a
@@ -6314,9 +6317,9 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
}
} else {
APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
- if ((!(N0.isUndef() || EltsLHS.isZero()) &&
+ if ((!(N0.isUndefOrPoison() || EltsLHS.isZero()) &&
!DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
- (!(N1.isUndef() || EltsRHS.isZero()) &&
+ (!(N1.isUndefOrPoison() || EltsRHS.isZero()) &&
!DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
return false;
}
@@ -6506,7 +6509,7 @@ static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
int hi = lo + MaskWidth;
// Strip UNDEF input usage.
- if (Inputs[i].isUndef())
+ if (Inputs[i].isUndefOrPoison())
for (int &M : Mask)
if ((lo <= M) && (M < hi))
M = SM_SentinelUndef;
@@ -6882,8 +6885,8 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, const SDLoc &DL,
std::bitset<4> Zeroable, Undefs;
for (int i = 0; i < 4; ++i) {
SDValue Elt = Op.getOperand(i);
- Undefs[i] = Elt.isUndef();
- Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
+ Undefs[i] = Elt.isUndefOrPoison();
+ Zeroable[i] = (Elt.isUndefOrPoison() || X86::isZeroNode(Elt));
}
assert(Zeroable.size() - Zeroable.count() > 1 &&
"We expect at least two non-zero elements!");
@@ -7140,7 +7143,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
SDValue Elt = peekThroughBitcasts(Elts[i]);
if (!Elt.getNode())
return SDValue();
- if (Elt.isUndef()) {
+ if (Elt.isUndefOrPoison()) {
UndefMask.setBit(i);
continue;
}
@@ -7348,15 +7351,15 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
if (!LoadMask[i])
continue;
SDValue Elt = peekThroughBitcasts(Elts[i]);
- if (RepeatedLoads[i % SubElems].isUndef())
+ if (RepeatedLoads[i % SubElems].isUndefOrPoison())
RepeatedLoads[i % SubElems] = Elt;
else
Match &= (RepeatedLoads[i % SubElems] == Elt);
}
// We must have loads at both ends of the repetition.
- Match &= !RepeatedLoads.front().isUndef();
- Match &= !RepeatedLoads.back().isUndef();
+ Match &= !RepeatedLoads.front().isUndefOrPoison();
+ Match &= !RepeatedLoads.back().isUndefOrPoison();
if (!Match)
continue;
@@ -7871,7 +7874,7 @@ static SDValue LowerBUILD_VECTORvXi1(SDValue Op, const SDLoc &dl,
int SplatIdx = -1;
for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
SDValue In = Op.getOperand(idx);
- if (In.isUndef())
+ if (In.isUndefOrPoison())
continue;
if (auto *InC = dyn_cast<ConstantSDNode>(In)) {
Immediate |= (InC->getZExtValue() & 0x1) << idx;
@@ -7999,7 +8002,7 @@ static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
SDValue Op = N->getOperand(i + BaseIdx);
// Skip UNDEFs.
- if (Op->isUndef()) {
+ if (Op->isUndefOrPoison()) {
// Update the expected vector extract index.
if (i * 2 == NumElts)
ExpectedVExtractIdx = BaseIdx;
@@ -8029,13 +8032,13 @@ static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
unsigned I1 = Op1.getConstantOperandVal(1);
if (i * 2 < NumElts) {
- if (V0.isUndef()) {
+ if (V0.isUndefOrPoison()) {
V0 = Op0.getOperand(0);
if (V0.getValueType() != VT)
return false;
}
} else {
- if (V1.isUndef()) {
+ if (V1.isUndefOrPoison()) {
V1 = Op0.getOperand(0);
if (V1.getValueType() != VT)
return false;
@@ -8111,16 +8114,16 @@ static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
if (Mode) {
// Don't emit a horizontal binop if the result is expected to be UNDEF.
- if (!isUndefLO && !V0->isUndef())
+ if (!isUndefLO && !V0->isUndefOrPoison())
LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
- if (!isUndefHI && !V1->isUndef())
+ if (!isUndefHI && !V1->isUndefOrPoison())
HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
} else {
// Don't emit a horizontal binop if the result is expected to be UNDEF.
- if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
+ if (!isUndefLO && (!V0_LO->isUndefOrPoison() || !V1_LO->isUndefOrPoison()))
LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
- if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
+ if (!isUndefHI && (!V0_HI->isUndefOrPoison() || !V1_HI->isUndefOrPoison()))
HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
}
@@ -8188,12 +8191,12 @@ static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
Opc[i % 2] = Opcode;
// Update InVec0 and InVec1.
- if (InVec0.isUndef()) {
+ if (InVec0.isUndefOrPoison()) {
InVec0 = Op0.getOperand(0);
if (InVec0.getSimpleValueType() != VT)
return false;
}
- if (InVec1.isUndef()) {
+ if (InVec1.isUndefOrPoison()) {
InVec1 = Op1.getOperand(0);
if (InVec1.getSimpleValueType() != VT)
return false;
@@ -8222,8 +8225,8 @@ static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
// Ensure we have found an opcode for both parities and that they are
// different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
// inputs are undef.
- if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
- InVec0.isUndef() || InVec1.isUndef())
+ if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] || InVec0.isUndefOrPoison() ||
+ InVec1.isUndefOrPoison())
return false;
IsSubAdd = Opc[0] == ISD::FADD;
@@ -8347,7 +8350,7 @@ static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
// Ignore undef elements.
SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
continue;
// If there's an opcode mismatch, we're done.
@@ -8380,10 +8383,10 @@ static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
// The source vector is chosen based on which 64-bit half of the
// destination vector is being calculated.
if (j < NumEltsIn64Bits) {
- if (V0.isUndef())
+ if (V0.isUndefOrPoison())
V0 = Op0.getOperand(0);
} else {
- if (V1.isUndef())
+ if (V1.isUndefOrPoison())
V1 = Op0.getOperand(0);
}
@@ -8437,7 +8440,7 @@ static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
unsigned NumElts = VT.getVectorNumElements();
APInt DemandedElts = APInt::getAllOnes(NumElts);
for (unsigned i = 0; i != NumElts; ++i)
- if (BV->getOperand(i).isUndef())
+ if (BV->getOperand(i).isUndefOrPoison())
DemandedElts.clearBit(i);
// If we don't need the upper xmm, then perform as a xmm hop.
@@ -8459,7 +8462,7 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV, const SDLoc &DL,
SelectionDAG &DAG) {
// We need at least 2 non-undef elements to make this worthwhile by default.
unsigned NumNonUndefs =
- count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
+ count_if(BV->op_values(), [](SDValue V) { return !V.isUndefOrPoison(); });
if (NumNonUndefs < 2)
return SDValue();
@@ -8487,11 +8490,11 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV, const SDLoc &DL,
unsigned NumUndefsLO = 0;
unsigned NumUndefsHI = 0;
for (unsigned i = 0, e = Half; i != e; ++i)
- if (BV->getOperand(i)->isUndef())
+ if (BV->getOperand(i)->isUndefOrPoison())
NumUndefsLO++;
for (unsigned i = Half, e = NumElts; i != e; ++i)
- if (BV->getOperand(i)->isUndef())
+ if (BV->getOperand(i)->isUndefOrPoison())
NumUndefsHI++;
SDValue InVec0, InVec1;
@@ -8503,15 +8506,19 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV, const SDLoc &DL,
if (isHorizontalBinOpPart(BV, ISD::ADD, DL, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOpPart(BV, ISD::ADD, DL, DAG, Half, NumElts, InVec2,
InVec3) &&
- ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
- ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
+ ((InVec0.isUndefOrPoison() || InVec2.isUndefOrPoison()) ||
+ InVec0 == InVec2) &&
+ ((InVec1.isUndefOrPoison() || InVec3.isUndefOrPoison()) ||
+ InVec1 == InVec3))
X86Opcode = X86ISD::HADD;
else if (isHorizontalBinOpPart(BV, ISD::SUB, DL, DAG, 0, Half, InVec0,
InVec1) &&
isHorizontalBinOpPart(BV, ISD::SUB, DL, DAG, Half, NumElts, InVec2,
InVec3) &&
- ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
- ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
+ ((InVec0.isUndefOrPoison() || InVec2.isUndefOrPoison()) ||
+ InVec0 == InVec2) &&
+ ((InVec1.isUndefOrPoison() || InVec3.isUndefOrPoison()) ||
+ InVec1 == InVec3))
X86Opcode = X86ISD::HSUB;
else
CanFold = false;
@@ -8525,9 +8532,10 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV, const SDLoc &DL,
// Convert this build_vector into a pair of horizontal binops followed by
// a concat vector. We must adjust the outputs from the partial horizontal
// matching calls above to account for undefined vector halves.
- SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
- SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
- assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
+ SDValue V0 = InVec0.isUndefOrPoison() ? InVec2 : InVec0;
+ SDValue V1 = InVec1.isUndefOrPoison() ? InVec3 : InVec1;
+ assert((!V0.isUndefOrPoison() || !V1.isUndefOrPoison()) &&
+ "Horizontal-op of undefs?");
bool isUndefLO = NumUndefsLO == Half;
bool isUndefHI = NumUndefsHI == Half;
return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
@@ -9024,7 +9032,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
unsigned NumConstants = NumElems;
for (unsigned i = 0; i < NumElems; ++i) {
SDValue Elt = Op.getOperand(i);
- if (Elt.isUndef()) {
+ if (Elt.isUndefOrPoison()) {
UndefMask.setBit(i);
continue;
}
@@ -9142,7 +9150,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
- else if (!Elt.isUndef()) {
+ else if (!Elt.isUndefOrPoison()) {
assert(!VarElt.getNode() && !InsIndex.getNode() &&
"Expected one variable element in this vector");
VarElt = Elt;
@@ -9389,13 +9397,14 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// For SSE 4.1, use insertps to put the high elements into the low element.
if (Subtarget.hasSSE41() && EltVT != MVT::f16) {
SDValue Result;
- if (!Op.getOperand(0).isUndef())
+ if (!Op.getOperand(0).isUndefOrPoison())
Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
else
Result = DAG.getUNDEF(VT);
for (unsigned i = 1; i < NumElems; ++i) {
- if (Op.getOperand(i).isUndef()) continue;
+ if (Op.getOperand(i).isUndefOrPoison())
+ continue;
Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
Op.getOperand(i), DAG.getVectorIdxConstant(i, dl));
}
@@ -9407,7 +9416,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// bottom slot of the vector (which generates no code for SSE).
SmallVector<SDValue, 8> Ops(NumElems);
for (unsigned i = 0; i < NumElems; ++i) {
- if (!Op.getOperand(i).isUndef())
+ if (!Op.getOperand(i).isUndefOrPoison())
Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
else
Ops[i] = DAG.getUNDEF(VT);
@@ -9450,7 +9459,7 @@ static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
unsigned NonZeros = 0;
for (unsigned i = 0; i != NumOperands; ++i) {
SDValue SubVec = Op.getOperand(i);
- if (SubVec.isUndef())
+ if (SubVec.isUndefOrPoison())
continue;
if (ISD::isFreezeUndef(SubVec.getNode())) {
// If the freeze(undef) has multiple uses then we must fold to zero.
@@ -9515,7 +9524,7 @@ static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
uint64_t NonZeros = 0;
for (unsigned i = 0; i != NumOperands; ++i) {
SDValue SubVec = Op.getOperand(i);
- if (SubVec.isUndef())
+ if (SubVec.isUndefOrPoison())
continue;
assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
@@ -10638,13 +10647,15 @@ static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
unsigned NumBits2 = N2.getScalarValueSizeInBits();
bool IsZero1 = llvm::isNullOrNullSplat(N1, /*AllowUndefs*/ false);
bool IsZero2 = llvm::isNullOrNullSplat(N2, /*AllowUndefs*/ false);
- if ((!N1.isUndef() && !IsZero1 && NumBits1 != NumSrcBits) ||
- (!N2.isUndef() && !IsZero2 && NumBits2 != NumSrcBits))
+ if ((!N1.isUndefOrPoison() && !IsZero1 && NumBits1 != NumSrcBits) ||
+ (!N2.isUndefOrPoison() && !IsZero2 && NumBits2 != NumSrcBits))
return false;
if (Subtarget.hasSSE41() || BitSize == 8) {
APInt ZeroMask = APInt::getHighBitsSet(NumSrcBits, NumPackedBits);
- if ((N1.isUndef() || IsZero1 || DAG.MaskedValueIsZero(N1, ZeroMask)) &&
- (N2.isUndef() || IsZero2 || DAG.MaskedValueIsZero(N2, ZeroMask))) {
+ if ((N1.isUndefOrPoison() || IsZero1 ||
+ DAG.MaskedValueIsZero(N1, ZeroMask)) &&
+ (N2.isUndefOrPoison() || IsZero2 ||
+ DAG.MaskedValueIsZero(N2, ZeroMask))) {
V1 = N1;
V2 = N2;
SrcVT = PackVT;
@@ -10654,9 +10665,9 @@ static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
}
bool IsAllOnes1 = llvm::isAllOnesOrAllOnesSplat(N1, /*AllowUndefs*/ false);
bool IsAllOnes2 = llvm::isAllOnesOrAllOnesSplat(N2, /*AllowUndefs*/ false);
- if ((N1.isUndef() || IsZero1 || IsAllOnes1 ||
+ if ((N1.isUndefOrPoison() || IsZero1 || IsAllOnes1 ||
DAG.ComputeNumSignBits(N1) > NumPackedBits) &&
- (N2.isUndef() || IsZero2 || IsAllOnes2 ||
+ (N2.isUndefOrPoison() || IsZero2 || IsAllOnes2 ||
DAG.ComputeNumSignBits(N2) > NumPackedBits)) {
V1 = N1;
V2 = N2;
@@ -10823,9 +10834,9 @@ static bool matchShuffleAsBlend(MVT VT, SDValue V1, SDValue V2,
const APInt &Zeroable, bool &ForceV1Zero,
bool &ForceV2Zero, uint64_t &BlendMask) {
bool V1IsZeroOrUndef =
- V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
+ V1.isUndefOrPoison() || ISD::isBuildVectorAllZeros(V1.getNode());
bool V2IsZeroOrUndef =
- V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
+ V2.isUndefOrPoison() || ISD::isBuildVectorAllZeros(V2.getNode());
BlendMask = 0;
ForceV1Zero = false, ForceV2Zero = false;
@@ -11123,9 +11134,9 @@ static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
// Normalize the mask value depending on whether it's V1 or V2.
int NormM = M;
SDValue &Op = Ops[Elt & 1];
- if (M < NumElts && (Op.isUndef() || Op == V1))
+ if (M < NumElts && (Op.isUndefOrPoison() || Op == V1))
Op = V1;
- else if (NumElts <= M && (Op.isUndef() || Op == V2)) {
+ else if (NumElts <= M && (Op.isUndefOrPoison() || Op == V2)) {
Op = V2;
NormM -= NumElts;
} else
@@ -11194,7 +11205,7 @@ static SDValue lowerShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
assert(Mask.size() >= 2 && "Single element masks are invalid.");
// This routine only supports 128-bit integer dual input vectors.
- if (VT.isFloatingPoint() || !VT.is128BitVector() || V2.isUndef())
+ if (VT.isFloatingPoint() || !VT.is128BitVector() || V2.isUndefOrPoison())
return SDValue();
int NumLoInputs =
@@ -13072,7 +13083,7 @@ static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
Mask, Subtarget, DAG))
@@ -13156,7 +13167,7 @@ static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
Mask, Subtarget, DAG))
@@ -14383,7 +14394,7 @@ static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
SelectionDAG &DAG) {
// Commute binary inputs so V2 is a load to simplify VPERMI2/T2 folds.
SmallVector<int, 32> Mask(OriginalMask);
- if (!V2.isUndef() && isShuffleFoldableLoad(V1) &&
+ if (!V2.isUndefOrPoison() && isShuffleFoldableLoad(V1) &&
!isShuffleFoldableLoad(V2)) {
ShuffleVectorSDNode::commuteMask(Mask);
std::swap(V1, V2);
@@ -14411,7 +14422,7 @@ static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
}
SDValue Result;
- if (V2.isUndef())
+ if (V2.isUndefOrPoison())
Result = DAG.getNode(X86ISD::VPERMV, DL, ShuffleVT, MaskNode, V1);
else
Result = DAG.getNode(X86ISD::VPERMV3, DL, ShuffleVT, V1, MaskNode, V2);
@@ -14600,7 +14611,7 @@ static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
return V;
// Check for compaction patterns.
- bool IsSingleInput = V2.isUndef();
+ bool IsSingleInput = V2.isUndefOrPoison();
int NumEvenDrops = canLowerByDroppingElements(Mask, true, IsSingleInput);
// Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
@@ -14955,7 +14966,8 @@ static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- assert(!V2.isUndef() && "This routine must not be used to lower single-input "
+ assert(!V2.isUndefOrPoison() &&
+ "This routine must not be used to lower single-input "
"shuffles as it could then recurse on itself.");
int Size = Mask.size();
@@ -15047,7 +15059,7 @@ static SDValue lowerShuffleAsLanePermuteAndPermute(
int NumElts = VT.getVectorNumElements();
int NumLanes = VT.getSizeInBits() / 128;
int NumEltsPerLane = NumElts / NumLanes;
- bool CanUseSublanes = Subtarget.hasAVX2() && V2.isUndef();
+ bool CanUseSublanes = Subtarget.hasAVX2() && V2.isUndefOrPoison();
/// Attempts to find a sublane permute with the given size
/// that gets all elements into their target lanes.
@@ -15206,7 +15218,7 @@ static SDValue lowerShuffleAsLanePermuteAndShuffle(
}
// TODO - we could support shuffling V2 in the Flipped input.
- assert(V2.isUndef() &&
+ assert(V2.isUndefOrPoison() &&
"This last part of this routine only works on single input shuffles");
SmallVector<int> InLaneMask;
@@ -15236,7 +15248,7 @@ static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
// Attempt to match VBROADCAST*128 subvector broadcast load.
bool SplatLo = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1);
bool SplatHi = isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1);
@@ -15255,7 +15267,8 @@ static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
return SDValue();
}
- bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
+ bool V2IsZero =
+ !V2.isUndefOrPoison() && ISD::isBuildVectorAllZeros(V2.getNode());
SmallVector<int, 4> WidenedMask;
if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
@@ -15354,7 +15367,7 @@ static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const X86Subtarget &Subtarget, SelectionDAG &DAG) {
- assert(!V2.isUndef() && "This is only useful with multiple inputs.");
+ assert(!V2.isUndefOrPoison() && "This is only useful with multiple inputs.");
if (is128BitLaneRepeatedShuffleMask(VT, Mask))
return SDValue();
@@ -15688,7 +15701,7 @@ static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
// canonicalized to undef), then we can use vpermpd. Otherwise, we
// are better off extracting the upper half of 1 operand and using a
// narrow shuffle.
- if (EltWidth == 64 && V2.isUndef())
+ if (EltWidth == 64 && V2.isUndefOrPoison())
return SDValue();
// If this is an unary vXi8 shuffle with inplace halves, then perform as
// full width pshufb, and then merge.
@@ -15917,7 +15930,7 @@ static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
bool OnlyLowestElts = isUndefOrInRange(Mask, 0, NumLaneElts);
MinSubLaneScale = 2;
MaxSubLaneScale =
- (!OnlyLowestElts && V2.isUndef() && VT == MVT::v32i8) ? 4 : 2;
+ (!OnlyLowestElts && V2.isUndefOrPoison() && VT == MVT::v32i8) ? 4 : 2;
}
if (Subtarget.hasBWI() && VT == MVT::v64i8)
MinSubLaneScale = MaxSubLaneScale = 4;
@@ -16135,7 +16148,7 @@ static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
Subtarget, DAG))
return V;
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
Mask, Subtarget, DAG))
@@ -16272,7 +16285,7 @@ static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
Subtarget, DAG, /*BitwiseOnly*/ true))
return Shift;
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
// When the shuffle is mirrored between the 128-bit lanes of the unit, we
// can use lower latency instructions that will operate on both lanes.
SmallVector<int, 2> RepeatedMask;
@@ -16399,7 +16412,7 @@ static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
- if (V2.isUndef())
+ if (V2.isUndefOrPoison())
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
@@ -16420,7 +16433,7 @@ static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
// If we have a single input shuffle with different shuffle patterns in the
// two 128-bit lanes use the variable mask to VPERMILPS.
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask)) {
SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
@@ -16506,7 +16519,7 @@ static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
// For non-AVX512 if the Mask is of 16bit elements in lane then try to split
// since after split we get a more efficient code than vblend by using
// vpunpcklwd and vpunpckhwd instrs.
- if (isUnpackWdShuffleMask(Mask, MVT::v8i32, DAG) && !V2.isUndef() &&
+ if (isUnpackWdShuffleMask(Mask, MVT::v8i32, DAG) && !V2.isUndefOrPoison() &&
!Subtarget.hasAVX512())
return lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask, Zeroable,
Subtarget, DAG);
@@ -16540,7 +16553,7 @@ static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
if (Is128BitLaneRepeatedShuffle) {
assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
- if (V2.isUndef())
+ if (V2.isUndefOrPoison())
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
@@ -16582,7 +16595,7 @@ static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
return V;
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
// Try to produce a fixed cross-128-bit lane permute followed by unpack
// because that should be faster than the variable permute alternatives.
if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v8i32, V1, V2, Mask, DAG))
@@ -16676,7 +16689,7 @@ static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
return V;
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
// Try to use bit rotation instructions.
if (SDValue Rotate =
lowerShuffleAsBitRotate(DL, MVT::v16i16, V1, Mask, Subtarget, DAG))
@@ -16794,7 +16807,7 @@ static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
return Rotate;
// Try to use bit rotation instructions.
- if (V2.isUndef())
+ if (V2.isUndefOrPoison())
if (SDValue Rotate =
lowerShuffleAsBitRotate(DL, MVT::v32i8, V1, Mask, Subtarget, DAG))
return Rotate;
@@ -16807,7 +16820,8 @@ static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
// There are no generalized cross-lane shuffle operations available on i8
// element types.
- if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
+ if (V2.isUndefOrPoison() &&
+ is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
// Try to produce a fixed cross-128-bit lane permute followed by unpack
// because that should be faster than the variable permute alternatives.
if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v32i8, V1, V2, Mask, DAG))
@@ -17030,7 +17044,7 @@ static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
SDValue Op = Widened128Mask[i] >= 4 ? V2 : V1;
unsigned OpIndex = i / 2;
- if (Ops[OpIndex].isUndef())
+ if (Ops[OpIndex].isUndefOrPoison())
Ops[OpIndex] = Op;
else if (Ops[OpIndex] != Op)
return SDValue();
@@ -17051,7 +17065,7 @@ static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
// Use low duplicate instructions for masks that match their pattern.
if (isShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6}, V1, V2))
return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
@@ -17117,7 +17131,7 @@ static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
- if (V2.isUndef())
+ if (V2.isUndefOrPoison())
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
@@ -17149,7 +17163,7 @@ static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
// If we have a single input shuffle with different shuffle patterns in the
// 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
- if (V2.isUndef() &&
+ if (V2.isUndefOrPoison() &&
!is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
@@ -17179,7 +17193,7 @@ static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
Subtarget, DAG, /*BitwiseOnly*/ true))
return Shift;
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
// When the shuffle is mirrored between the 128-bit lanes of the unit, we
// can use lower latency instructions that will operate on all four
// 128-bit lanes.
@@ -17274,7 +17288,7 @@ static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
if (Is128BitLaneRepeatedShuffle) {
assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
- if (V2.isUndef())
+ if (V2.isUndefOrPoison())
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
@@ -17370,7 +17384,7 @@ static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
Subtarget, DAG))
return Rotate;
- if (V2.isUndef()) {
+ if (V2.isUndefOrPoison()) {
// Try to use bit rotation instructions.
if (SDValue Rotate =
lowerShuffleAsBitRotate(DL, MVT::v32i16, V1, Mask, Subtarget, DAG))
@@ -17396,7 +17410,7 @@ static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
- if (!V2.isUndef())
+ if (!V2.isUndefOrPoison())
if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
DL, MVT::v32i16, V1, V2, Mask, Subtarget, DAG))
return Result;
@@ -17442,7 +17456,7 @@ static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
return Rotate;
// Try to use bit rotation instructions.
- if (V2.isUndef())
+ if (V2.isUndefOrPoison())
if (SDValue Rotate =
lowerShuffleAsBitRotate(DL, MVT::v64i8, V1, Mask, Subtarget, DAG))
return Rotate;
@@ -17486,7 +17500,7 @@ static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
- if (!V2.isUndef())
+ if (!V2.isUndefOrPoison())
if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
return Result;
@@ -17582,7 +17596,7 @@ static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// Shuffle should be unary.
- if (!V2.isUndef())
+ if (!V2.isUndefOrPoison())
return SDValue();
int ShiftAmt = -1;
@@ -17934,8 +17948,8 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
"Can't lower MMX shuffles");
- bool V1IsUndef = V1.isUndef();
- bool V2IsUndef = V2.isUndef();
+ bool V1IsUndef = V1.isUndefOrPoison();
+ bool V2IsUndef = V2.isUndefOrPoison();
if (V1IsUndef && V2IsUndef)
return DAG.getUNDEF(VT);
@@ -18094,10 +18108,11 @@ static SDValue lowerVECTOR_COMPRESS(SDValue Op, const X86Subtarget &Subtarget,
DAG, DL);
Mask = widenSubVector(LargeMaskVT, Mask, /*ZeroNewElements=*/true,
Subtarget, DAG, DL);
- Passthru = Passthru.isUndef() ? DAG.getUNDEF(LargeVecVT)
- : widenSubVector(LargeVecVT, Passthru,
- /*ZeroNewElements=*/false,
- Subtarget, DAG, DL);
+ Passthru =
+ Passthru.isUndefOrPoison()
+ ? DAG.getUNDEF(LargeVecVT)
+ : widenSubVector(LargeVecVT, Passthru,
+ /*ZeroNewElements=*/false, Subtarget, DAG, DL);
SDValue Compressed =
DAG.getNode(ISD::VECTOR_COMPRESS, DL, LargeVecVT, Vec, Mask, Passthru);
@@ -18111,7 +18126,7 @@ static SDValue lowerVECTOR_COMPRESS(SDValue Op, const X86Subtarget &Subtarget,
EVT LargeVecVT = MVT::getVectorVT(LageElementVT, NumElements);
Vec = DAG.getNode(ISD::ANY_EXTEND, DL, LargeVecVT, Vec);
- Passthru = Passthru.isUndef()
+ Passthru = Passthru.isUndefOrPoison()
? DAG.getUNDEF(LargeVecVT)
: DAG.getNode(ISD::ANY_EXTEND, DL, LargeVecVT, Passthru);
@@ -20735,7 +20750,7 @@ static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
std::tie(Lo, Hi) = splitVector(In, DAG, DL);
// If Hi is undef, then don't bother packing it and widen the result instead.
- if (Hi.isUndef()) {
+ if (Hi.isUndefOrPoison()) {
EVT DstHalfVT = DstVT.getHalfNumVectorElementsVT(Ctx);
if (SDValue Res =
truncateVectorWithPACK(Opcode, DstHalfVT, Lo, DL, DAG, Subtarget))
@@ -25872,7 +25887,7 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
- if (PreservedSrc.isUndef())
+ if (PreservedSrc.isUndefOrPoison())
PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
}
@@ -25905,7 +25920,7 @@ static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
Op.getOpcode() == X86ISD::VFPCLASSS)
return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
- if (PreservedSrc.isUndef())
+ if (PreservedSrc.isUndefOrPoison())
PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
}
@@ -26536,7 +26551,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return Op.getOperand(1);
// Avoid false dependency.
- if (PassThru.isUndef())
+ if (PassThru.isUndefOrPoison())
PassThru = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
@@ -26685,7 +26700,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
// Break false dependency.
- if (PassThru.isUndef())
+ if (PassThru.isUndefOrPoison())
PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
@@ -27045,7 +27060,7 @@ static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
// If source is undef or we know it won't be used, use a zero vector
// to break register dependency.
// TODO: use undef instead and let BreakFalseDeps deal with it?
- if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
+ if (Src.isUndefOrPoison() || ISD::isBuildVectorAllOnes(Mask.getNode()))
Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
// Cast mask to an integer type.
@@ -27086,7 +27101,7 @@ static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
// If source is undef or we know it won't be used, use a zero vector
// to break register dependency.
// TODO: use undef instead and let BreakFalseDeps deal with it?
- if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
+ if (Src.isUndefOrPoison() || ISD::isBuildVectorAllOnes(Mask.getNode()))
Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
@@ -28985,7 +29000,7 @@ static SDValue LowerFMINIMUM_FMAXIMUM(SDValue Op, const X86Subtarget &Subtarget,
if (Op->getOpcode() == ISD::BUILD_VECTOR ||
Op->getOpcode() == ISD::SPLAT_VECTOR) {
for (const SDValue &OpVal : Op->op_values()) {
- if (OpVal.isUndef())
+ if (OpVal.isUndefOrPoison())
continue;
auto *CstOp = dyn_cast<ConstantFPSDNode>(OpVal);
if (!CstOp)
@@ -30175,7 +30190,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
if (ConstantAmt) {
for (unsigned I = 0; I != NumElts; ++I) {
SDValue A = Amt.getOperand(I);
- if (A.isUndef() || A->getAsAPIntVal().uge(EltSizeInBits))
+ if (A.isUndefOrPoison() || A->getAsAPIntVal().uge(EltSizeInBits))
continue;
unsigned CstAmt = A->getAsAPIntVal().getZExtValue();
if (UniqueCstAmt.count(CstAmt)) {
@@ -30252,19 +30267,20 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
for (unsigned SrcI = 0, E = AmtWideElts.size(); SrcI != E; SrcI += 2) {
unsigned DstI = SrcI / 2;
// Both elements are undef? Make a note and keep going.
- if (AmtWideElts[SrcI].isUndef() && AmtWideElts[SrcI + 1].isUndef()) {
+ if (AmtWideElts[SrcI].isUndefOrPoison() &&
+ AmtWideElts[SrcI + 1].isUndefOrPoison()) {
TmpAmtWideElts[DstI] = AmtWideElts[SrcI];
continue;
}
// Even element is undef? We will shift it by the same shift amount as
// the odd element.
- if (AmtWideElts[SrcI].isUndef()) {
+ if (AmtWideElts[SrcI].isUndefOrPoison()) {
TmpAmtWideElts[DstI] = AmtWideElts[SrcI + 1];
continue;
}
// Odd element is undef? We will shift it by the same shift amount as
// the even element.
- if (AmtWideElts[SrcI + 1].isUndef()) {
+ if (AmtWideElts[SrcI + 1].isUndefOrPoison()) {
TmpAmtWideElts[DstI] = AmtWideElts[SrcI];
continue;
}
@@ -32755,7 +32771,7 @@ static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
if (InVT == NVT)
return InOp;
- if (InOp.isUndef())
+ if (InOp.isUndefOrPoison())
return DAG.getUNDEF(NVT);
assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
@@ -32770,7 +32786,7 @@ static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
if (InOp.getOpcode() == ISD::CONCAT_VECTORS && InOp.getNumOperands() == 2) {
SDValue N1 = InOp.getOperand(1);
if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
- N1.isUndef()) {
+ N1.isUndefOrPoison()) {
InOp = InOp.getOperand(0);
InVT = InOp.getSimpleValueType();
InNumElts = InVT.getVectorNumElements();
@@ -32868,7 +32884,8 @@ static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
// Handle AVX masked loads which don't support passthru other than 0.
if (MaskVT.getVectorElementType() != MVT::i1) {
// We also allow undef in the isel pattern.
- if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
+ if (PassThru.isUndefOrPoison() ||
+ ISD::isBuildVectorAllZeros(PassThru.getNode()))
return Op;
SDValue NewLoad = DAG.getMaskedLoad(
@@ -33002,7 +33019,7 @@ static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
}
// Break dependency on the data register.
- if (PassThru.isUndef())
+ if (PassThru.isUndefOrPoison())
PassThru = getZeroVector(VT, Subtarget, DAG, dl);
SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
@@ -33175,8 +33192,9 @@ SDValue X86TargetLowering::visitMaskedLoad(
EVT VTy = PassThru.getValueType();
EVT Ty = VTy.getVectorElementType();
SDVTList Tys = DAG.getVTList(Ty, MVT::Other);
- auto ScalarPassThru = PassThru.isUndef() ? DAG.getConstant(0, DL, Ty)
- : DAG.getBitcast(Ty, PassThru);
+ auto ScalarPassThru = PassThru.isUndefOrPoison()
+ ? DAG.getConstant(0, DL, Ty)
+ : DAG.getBitcast(Ty, PassThru);
auto Flags = getFlagsOfCmpZeroFori1(DAG, DL, Mask);
auto COND_NE = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
SDValue Ops[] = {Chain, Ptr, ScalarPassThru, COND_NE, Flags};
@@ -38028,7 +38046,7 @@ X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
if (!ISD::isBuildVectorOfConstantSDNodes(V.getNode()))
return false;
for (unsigned i = 0, e = V.getNumOperands(); i != e; ++i) {
- if (!DemandedElts[i] || V.getOperand(i).isUndef())
+ if (!DemandedElts[i] || V.getOperand(i).isUndefOrPoison())
continue;
const APInt &Val = V.getConstantOperandAPInt(i);
if (Val.getBitWidth() > Val.getNumSignBits() &&
@@ -39649,7 +39667,7 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
SDValue Op = ScaledMask[i] >= 4 ? V2 : V1;
unsigned OpIndex = i / 2;
- if (Ops[OpIndex].isUndef())
+ if (Ops[OpIndex].isUndefOrPoison())
Ops[OpIndex] = Op;
else if (Ops[OpIndex] != Op)
return SDValue();
@@ -40300,7 +40318,7 @@ static SDValue combineX86ShuffleChainWithExtract(
continue;
}
if (Input.getOpcode() == ISD::INSERT_SUBVECTOR &&
- Input.getOperand(0).isUndef()) {
+ Input.getOperand(0).isUndefOrPoison()) {
Input = peekThroughBitcasts(Input.getOperand(1));
continue;
}
@@ -40353,7 +40371,7 @@ static SDValue combineX86ShuffleChainWithExtract(
}
// TODO: Handle insertions into upper subvectors.
if (Input.getOpcode() == ISD::INSERT_SUBVECTOR &&
- Input.getOperand(0).isUndef() &&
+ Input.getOperand(0).isUndefOrPoison() &&
isNullConstant(Input.getOperand(2))) {
Input = peekThroughBitcasts(Input.getOperand(1));
continue;
@@ -40988,7 +41006,8 @@ static SDValue combineX86ShufflesRecursively(
// TODO: Can resolveTargetShuffleInputsAndMask do some of this?
for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
SDValue &Op = Ops[I];
- if (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op.getOperand(0).isUndef() &&
+ if (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ Op.getOperand(0).isUndefOrPoison() &&
isNullConstant(Op.getOperand(2))) {
Op = Op.getOperand(1);
unsigned Scale = RootSizeInBits / Op.getValueSizeInBits();
@@ -41727,13 +41746,13 @@ static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
EVT SrcVT0 = Src0.getValueType();
EVT SrcVT1 = Src1.getValueType();
- if (!Src1.isUndef() && (SrcVT0 != SrcVT1 || SrcOpc0 != SrcOpc1))
+ if (!Src1.isUndefOrPoison() && (SrcVT0 != SrcVT1 || SrcOpc0 != SrcOpc1))
return SDValue();
switch (SrcOpc0) {
case X86ISD::MOVDDUP: {
SDValue LHS = Src0.getOperand(0);
- SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
+ SDValue RHS = Src1.isUndefOrPoison() ? Src1 : Src1.getOperand(0);
SDValue Res =
DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS, V.getOperand(2));
Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res);
@@ -41751,9 +41770,9 @@ static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
case X86ISD::VSRLI:
case X86ISD::VSRAI:
case X86ISD::PSHUFD:
- if (Src1.isUndef() || Src0.getOperand(1) == Src1.getOperand(1)) {
+ if (Src1.isUndefOrPoison() || Src0.getOperand(1) == Src1.getOperand(1)) {
SDValue LHS = Src0.getOperand(0);
- SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
+ SDValue RHS = Src1.isUndefOrPoison() ? Src1 : Src1.getOperand(0);
SDValue Res = DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS,
V.getOperand(2));
Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res, Src0.getOperand(1));
@@ -42101,7 +42120,8 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
if (!DCI.isBeforeLegalizeOps() && N0.hasOneUse()) {
SDValue V = peekThroughOneUseBitcasts(N0);
- if (V.getOpcode() == ISD::INSERT_SUBVECTOR && V.getOperand(0).isUndef() &&
+ if (V.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ V.getOperand(0).isUndefOrPoison() &&
isNullConstant(V.getOperand(2))) {
SDValue In = V.getOperand(1);
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
@@ -42258,9 +42278,9 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
if (LHS.getOpcode() == ISD::BITCAST &&
- (RHS.getOpcode() == ISD::BITCAST || RHS.isUndef())) {
+ (RHS.getOpcode() == ISD::BITCAST || RHS.isUndefOrPoison())) {
EVT SrcVT = LHS.getOperand(0).getValueType();
- if (RHS.isUndef() || SrcVT == RHS.getOperand(0).getValueType()) {
+ if (RHS.isUndefOrPoison() || SrcVT == RHS.getOperand(0).getValueType()) {
return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT,
DAG.getBitcast(SrcVT, LHS),
DAG.getBitcast(SrcVT, RHS),
@@ -42373,12 +42393,12 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
unsigned ZeroMask = InsertPSMask & 0xF;
// If we zero out all elements from Op0 then we don't need to reference it.
- if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
+ if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndefOrPoison())
return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
// If we zero out the element from Op1 then we don't need to reference it.
- if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
+ if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndefOrPoison())
return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
@@ -42816,8 +42836,8 @@ static SDValue combineShuffleOfConcatUndef(SDNode *N, const SDLoc &DL,
// Check that both sources are concats with undef.
if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
- N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
- !N1.getOperand(1).isUndef())
+ N1.getNumOperands() != 2 || !N0.getOperand(1).isUndefOrPoison() ||
+ !N1.getOperand(1).isUndefOrPoison())
return SDValue();
// Construct the new shuffle mask. Elements from the first source retain their
@@ -44711,7 +44731,7 @@ static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
SDValue LowerOp = SubSrcOps[0];
ArrayRef<SDValue> UpperOps(std::next(SubSrcOps.begin()), SubSrcOps.end());
if (LowerOp.getOpcode() == ISD::SETCC &&
- all_of(UpperOps, [](SDValue Op) { return Op.isUndef(); })) {
+ all_of(UpperOps, [](SDValue Op) { return Op.isUndefOrPoison(); })) {
EVT SubVT = VT.getIntegerVT(
*DAG.getContext(), LowerOp.getValueType().getVectorMinNumElements());
if (SDValue V = combineBitcastvxi1(DAG, SubVT, LowerOp, DL, Subtarget)) {
@@ -44819,7 +44839,7 @@ static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
APInt Imm(SrcVT.getVectorNumElements(), 0);
for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
SDValue In = Op.getOperand(Idx);
- if (!In.isUndef() && (In->getAsZExtVal() & 0x1))
+ if (!In.isUndefOrPoison() && (In->getAsZExtVal() & 0x1))
Imm.setBit(Idx);
}
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
@@ -44890,7 +44910,7 @@ static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
// Build MMX element from integer GPR or SSE float values.
auto CreateMMXElement = [&](SDValue V) {
- if (V.isUndef())
+ if (V.isUndefOrPoison())
return DAG.getUNDEF(MVT::x86mmx);
if (V.getValueType().isFloatingPoint()) {
if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
@@ -44912,7 +44932,7 @@ static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
// Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
if (Splat) {
- if (Splat.isUndef())
+ if (Splat.isUndefOrPoison())
return DAG.getUNDEF(MVT::x86mmx);
Splat = CreateMMXElement(Splat);
@@ -45201,7 +45221,7 @@ static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
bool LowUndef = true, AllUndefOrZero = true;
for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
SDValue Op = N0.getOperand(i);
- LowUndef &= Op.isUndef() || (i >= e/2);
+ LowUndef &= Op.isUndefOrPoison() || (i >= e / 2);
AllUndefOrZero &= isNullConstantOrUndef(Op);
}
if (AllUndefOrZero) {
@@ -48359,7 +48379,7 @@ static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8;
bool SignExt1 = DAG.ComputeNumSignBits(VecOp1) > 8;
// PMOVMSKB(PACKSSBW(X, undef)) -> PMOVMSKB(BITCAST_v16i8(X)) & 0xAAAA.
- if (IsAnyOf && CmpBits == 8 && VecOp1.isUndef()) {
+ if (IsAnyOf && CmpBits == 8 && VecOp1.isUndefOrPoison()) {
SDLoc DL(EFLAGS);
SDValue Result = DAG.getBitcast(MVT::v16i8, VecOp0);
Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
@@ -49708,8 +49728,8 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
// Constant Folding.
APInt UndefElts0, UndefElts1;
SmallVector<APInt, 32> EltBits0, EltBits1;
- if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
- (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
+ if ((N0.isUndefOrPoison() || N->isOnlyUserOf(N0.getNode())) &&
+ (N1.isUndefOrPoison() || N->isOnlyUserOf(N1.getNode())) &&
getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0,
/*AllowWholeUndefs*/ true,
/*AllowPartialUndefs*/ true) &&
@@ -49766,10 +49786,10 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
// Try to fold PACKSS(NOT(X),NOT(Y)) -> NOT(PACKSS(X,Y)).
// Currently limit this to allsignbits cases only.
if (IsSigned &&
- (N0.isUndef() || DAG.ComputeNumSignBits(N0) == SrcBitsPerElt) &&
- (N1.isUndef() || DAG.ComputeNumSignBits(N1) == SrcBitsPerElt)) {
- SDValue Not0 = N0.isUndef() ? N0 : IsNOT(N0, DAG);
- SDValue Not1 = N1.isUndef() ? N1 : IsNOT(N1, DAG);
+ (N0.isUndefOrPoison() || DAG.ComputeNumSignBits(N0) == SrcBitsPerElt) &&
+ (N1.isUndefOrPoison() || DAG.ComputeNumSignBits(N1) == SrcBitsPerElt)) {
+ SDValue Not0 = N0.isUndefOrPoison() ? N0 : IsNOT(N0, DAG);
+ SDValue Not1 = N1.isUndefOrPoison() ? N1 : IsNOT(N1, DAG);
if (Not0 && Not1) {
SDLoc DL(N);
MVT SrcVT = N0.getSimpleValueType();
@@ -49782,8 +49802,8 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
// Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
// truncate to create a larger truncate.
- if (Subtarget.hasAVX512() &&
- N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
+ if (Subtarget.hasAVX512() && N0.getOpcode() == ISD::TRUNCATE &&
+ N1.isUndefOrPoison() && VT == MVT::v16i8 &&
N0.getOperand(0).getValueType() == MVT::v8i32) {
if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
(!IsSigned &&
@@ -49813,7 +49833,7 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
N1.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
Src1 = N1.getOperand(0);
}
- if ((Src0 || N0.isUndef()) && (Src1 || N1.isUndef())) {
+ if ((Src0 || N0.isUndefOrPoison()) && (Src1 || N1.isUndefOrPoison())) {
assert((Src0 || Src1) && "Found PACK(UNDEF,UNDEF)");
Src0 = Src0 ? Src0 : DAG.getUNDEF(Src1.getValueType());
Src1 = Src1 ? Src1 : DAG.getUNDEF(Src0.getValueType());
@@ -49823,7 +49843,7 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
// Try again with pack(*_extend_vector_inreg, undef).
unsigned VecInRegOpc = IsSigned ? ISD::SIGN_EXTEND_VECTOR_INREG
: ISD::ZERO_EXTEND_VECTOR_INREG;
- if (N0.getOpcode() == VecInRegOpc && N1.isUndef() &&
+ if (N0.getOpcode() == VecInRegOpc && N1.isUndefOrPoison() &&
N0.getOperand(0).getScalarValueSizeInBits() < DstBitsPerElt)
return getEXTEND_VECTOR_INREG(ExtOpc, SDLoc(N), VT, N0.getOperand(0),
DAG);
@@ -49858,12 +49878,12 @@ static SDValue combineVectorHADDSUB(SDNode *N, SelectionDAG &DAG,
SDValue LHS1 = LHS.getOperand(1);
SDValue RHS0 = RHS.getOperand(0);
SDValue RHS1 = RHS.getOperand(1);
- if ((LHS0 == LHS1 || LHS0.isUndef() || LHS1.isUndef()) &&
- (RHS0 == RHS1 || RHS0.isUndef() || RHS1.isUndef())) {
+ if ((LHS0 == LHS1 || LHS0.isUndefOrPoison() || LHS1.isUndefOrPoison()) &&
+ (RHS0 == RHS1 || RHS0.isUndefOrPoison() || RHS1.isUndefOrPoison())) {
SDLoc DL(N);
SDValue Res = DAG.getNode(LHS.getOpcode(), DL, LHS.getValueType(),
- LHS0.isUndef() ? LHS1 : LHS0,
- RHS0.isUndef() ? RHS1 : RHS0);
+ LHS0.isUndefOrPoison() ? LHS1 : LHS0,
+ RHS0.isUndefOrPoison() ? RHS1 : RHS0);
MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
Res = DAG.getBitcast(ShufVT, Res);
SDValue NewLHS =
@@ -49935,7 +49955,7 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
assert(N1.getValueType() == MVT::i8 && "Unexpected shift amount type");
// (shift undef, X) -> 0
- if (N0.isUndef())
+ if (N0.isUndefOrPoison())
return DAG.getConstant(0, SDLoc(N), VT);
// Out of range logical bit shifts are guaranteed to be zero.
@@ -50086,7 +50106,8 @@ static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
SDValue Idx = N->getOperand(2);
// Fold insert_vector_elt(undef, elt, 0) --> scalar_to_vector(elt).
- if (Opcode == ISD::INSERT_VECTOR_ELT && Vec.isUndef() && isNullConstant(Idx))
+ if (Opcode == ISD::INSERT_VECTOR_ELT && Vec.isUndefOrPoison() &&
+ isNullConstant(Idx))
return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Scl);
if (Opcode == X86ISD::PINSRB || Opcode == X86ISD::PINSRW) {
@@ -50266,12 +50287,12 @@ static SDValue combineAndShuffleNot(SDNode *N, SelectionDAG &DAG,
// end-users are ISD::AND including cases
// (and(extract_vector_element(SVN), Y)).
if (!SVN || !SVN->hasOneUse() || !SVN->isSplat() ||
- !SVN->getOperand(1).isUndef()) {
+ !SVN->getOperand(1).isUndefOrPoison()) {
return SDValue();
}
SDValue IVEN = SVN->getOperand(0);
if (IVEN.getOpcode() != ISD::INSERT_VECTOR_ELT ||
- !IVEN.getOperand(0).isUndef() || !IVEN.hasOneUse())
+ !IVEN.getOperand(0).isUndefOrPoison() || !IVEN.hasOneUse())
return SDValue();
if (!isa<ConstantSDNode>(IVEN.getOperand(2)) ||
IVEN.getConstantOperandAPInt(2) != SVN->getSplatIndex())
@@ -52545,7 +52566,7 @@ static int getOneTrueElt(SDValue V) {
unsigned NumElts = BV->getValueType(0).getVectorNumElements();
for (unsigned i = 0; i < NumElts; ++i) {
const SDValue &Op = BV->getOperand(i);
- if (Op.isUndef())
+ if (Op.isUndefOrPoison())
continue;
auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
if (!ConstNode)
@@ -52665,7 +52686,7 @@ combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
// Don't try this if the pass-through operand is already undefined. That would
// cause an infinite loop because that's what we're about to create.
- if (ML->getPassThru().isUndef())
+ if (ML->getPassThru().isUndefOrPoison())
return SDValue();
if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
@@ -53185,7 +53206,7 @@ static bool isHorizontalBinOp(unsigned HOpcode, SDValue &LHS, SDValue &RHS,
SmallVectorImpl<int> &PostShuffleMask,
bool ForceHorizOp) {
// If either operand is undef, bail out. The binop should be simplified.
- if (LHS.isUndef() || RHS.isUndef())
+ if (LHS.isUndefOrPoison() || RHS.isUndefOrPoison())
return false;
// Look for the following pattern:
@@ -53972,7 +53993,7 @@ static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
case ISD::VECTOR_SHUFFLE: {
// For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
// of this is VECTOR_SHUFFLE(-VEC1, UNDEF). The mask can be anything here.
- if (!Op.getOperand(1).isUndef())
+ if (!Op.getOperand(1).isUndefOrPoison())
return SDValue();
if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
@@ -53985,7 +54006,7 @@ static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
// -V, INDEX).
SDValue InsVector = Op.getOperand(0);
SDValue InsVal = Op.getOperand(1);
- if (!InsVector.isUndef())
+ if (!InsVector.isUndefOrPoison())
return SDValue();
if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
@@ -54362,7 +54383,8 @@ static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
// Fold not(insert_subvector(undef,sub)) -> insert_subvector(undef,not(sub))
if (ISD::isBuildVectorAllOnes(N1.getNode()) && VT.isVector() &&
VT.getVectorElementType() == MVT::i1 &&
- N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.getOperand(0).isUndef() &&
+ N0.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ N0.getOperand(0).isUndefOrPoison() &&
TLI.isTypeLegal(N0.getOperand(1).getValueType())) {
return DAG.getNode(
ISD::INSERT_SUBVECTOR, DL, VT, N0.getOperand(0),
@@ -54736,7 +54758,7 @@ static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
// ANDNP(undef, x) -> 0
// ANDNP(x, undef) -> 0
- if (N0.isUndef() || N1.isUndef())
+ if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
return DAG.getConstant(0, DL, VT);
// ANDNP(0, x) -> x
@@ -55276,7 +55298,7 @@ static SDValue getInvertedVectorForFMA(SDValue V, SelectionDAG &DAG) {
if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
Ops.push_back(DAG.getConstantFP(-Cst->getValueAPF(), SDLoc(Op), EltVT));
} else {
- assert(Op.isUndef());
+ assert(Op.isUndefOrPoison());
Ops.push_back(DAG.getUNDEF(EltVT));
}
}
@@ -55472,8 +55494,8 @@ static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
SDValue N01 = N0.getOperand(1);
unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
- if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
- (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
+ if ((N00.isUndefOrPoison() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
+ (N01.isUndefOrPoison() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
return concatSubVectors(N00, N01, DAG, dl);
}
}
@@ -57408,7 +57430,7 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
unsigned EltSizeInBits = VT.getScalarSizeInBits();
- if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
+ if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndefOrPoison(); }))
return DAG.getUNDEF(VT);
if (llvm::all_of(Ops, [](SDValue Op) {
@@ -58175,12 +58197,13 @@ static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
uint64_t IdxVal = N->getConstantOperandVal(2);
MVT SubVecVT = SubVec.getSimpleValueType();
- if (Vec.isUndef() && SubVec.isUndef())
+ if (Vec.isUndefOrPoison() && SubVec.isUndefOrPoison())
return DAG.getUNDEF(OpVT);
// Inserting undefs/zeros into zeros/undefs is a zero vector.
- if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
- (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
+ if ((Vec.isUndefOrPoison() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
+ (SubVec.isUndefOrPoison() ||
+ ISD::isBuildVectorAllZeros(SubVec.getNode())))
return getZeroVector(OpVT, Subtarget, DAG, dl);
if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
@@ -58223,7 +58246,8 @@ static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
// TODO: This is a more general version of a DAGCombiner fold, can we move it
// there?
if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
- SubVec.getOperand(0).isUndef() && isNullConstant(SubVec.getOperand(2)))
+ SubVec.getOperand(0).isUndefOrPoison() &&
+ isNullConstant(SubVec.getOperand(2)))
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Vec,
SubVec.getOperand(1), N->getOperand(2));
@@ -58232,7 +58256,7 @@ static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
SubVec.getOperand(0).getSimpleValueType() == OpVT &&
(IdxVal != 0 ||
- !(Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())))) {
+ !(Vec.isUndefOrPoison() || ISD::isBuildVectorAllZeros(Vec.getNode())))) {
int ExtIdxVal = SubVec.getConstantOperandVal(1);
if (ExtIdxVal != 0) {
int VecNumElts = OpVT.getVectorNumElements();
@@ -58278,12 +58302,13 @@ static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
}
// If this is a broadcast insert into an upper undef, use a larger broadcast.
- if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
+ if (Vec.isUndefOrPoison() && IdxVal != 0 &&
+ SubVec.getOpcode() == X86ISD::VBROADCAST)
return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
// If this is a broadcast load inserted into an upper undef, use a larger
// broadcast load.
- if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
+ if (Vec.isUndefOrPoison() && IdxVal != 0 && SubVec.hasOneUse() &&
SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
@@ -58505,7 +58530,7 @@ static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
return true;
if (ISD::isBuildVectorOfConstantFPSDNodes(V.getNode()))
return true;
- return V.isUndef();
+ return V.isUndefOrPoison();
};
// If we're extracting the lowest subvector and we're the only user,
More information about the llvm-commits
mailing list