[llvm] Introducing a new ISD::POISON SDNode to represent the poison value in the IR. (PR #125883)

zhijian lin via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 7 06:48:39 PST 2025


https://github.com/diggerlin updated https://github.com/llvm/llvm-project/pull/125883

>From 96eb0d325ffac36bfb08f6032560f4b3e388de50 Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Wed, 5 Feb 2025 17:04:32 +0000
Subject: [PATCH 1/2] introduce a new ISDNODE POISON

---
 llvm/include/llvm/CodeGen/ISDOpcodes.h        |  3 ++
 llvm/include/llvm/CodeGen/SelectionDAG.h      |  7 +++--
 llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 13 ++++++--
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 13 ++++----
 llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 17 ++++++++++
 .../SelectionDAG/LegalizeFloatTypes.cpp       |  4 +++
 .../SelectionDAG/LegalizeIntegerTypes.cpp     |  2 ++
 .../SelectionDAG/LegalizeVectorTypes.cpp      |  3 ++
 .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 31 ++++++++++++-------
 .../SelectionDAG/SelectionDAGBuilder.cpp      |  2 +-
 .../SelectionDAG/SelectionDAGDumper.cpp       |  1 +
 .../CodeGen/SelectionDAG/SelectionDAGISel.cpp |  1 +
 12 files changed, 75 insertions(+), 22 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 604dc9419025b09..cdb0d4b92bbf709 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -217,6 +217,9 @@ enum NodeType {
   /// UNDEF - An undefined node.
   UNDEF,
 
+  /// POISON - A poison node.
+  POISON,
+
   /// FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or
   /// is evaluated to UNDEF), or returns VAL otherwise. Note that each
   /// read of UNDEF can yield different value, but FREEZE(UNDEF) cannot.
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index b31ad11c3ee0ee8..04d72eca9a35be2 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -871,7 +871,7 @@ class SelectionDAG {
   /// for integers, a type wider than) VT's element type.
   SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
     // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
-    if (Op.getOpcode() == ISD::UNDEF) {
+    if (Op.isUndef()) {
       assert((VT.getVectorElementType() == Op.getValueType() ||
               (VT.isInteger() &&
                VT.getVectorElementType().bitsLE(Op.getValueType()))) &&
@@ -887,7 +887,7 @@ class SelectionDAG {
   // Return a splat ISD::SPLAT_VECTOR node, consisting of Op splatted to all
   // elements.
   SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op) {
-    if (Op.getOpcode() == ISD::UNDEF) {
+    if (Op.isUndef()) {
       assert((VT.getVectorElementType() == Op.getValueType() ||
               (VT.isInteger() &&
                VT.getVectorElementType().bitsLE(Op.getValueType()))) &&
@@ -1128,6 +1128,9 @@ class SelectionDAG {
     return getNode(ISD::UNDEF, SDLoc(), VT);
   }
 
+  /// Return an POISON node. POISON does not have a useful SDLoc.
+  SDValue getPoison(EVT VT) { return getNode(ISD::POISON, SDLoc(), VT); }
+
   /// Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
   SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm,
                     bool ConstantFold = true);
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 03899493847b394..c98c9da43a30804 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -690,8 +690,17 @@ END_TWO_BYTE_PACK()
   /// \<target\>ISD namespace).
   bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
 
-  /// Return true if the type of the node type undefined.
-  bool isUndef() const { return NodeType == ISD::UNDEF; }
+  /// Returns true if the node type is UNDEF or, when UndefOnly is false,
+  /// POISON.
+  /// - When UndefOnly is true, returns true only for UNDEF.
+  /// - When UndefOnly is false, returns true for both UNDEF and POISON.
+  /// @param UndefOnly Determines whether to check only for UNDEF.
+  bool isUndef(bool UndefOnly = false) const {
+    return NodeType == ISD::UNDEF || (!UndefOnly && NodeType == ISD::POISON);
+  }
+
+  /// Return true if the type of the node type poison.
+  bool isPoison() const { return NodeType == ISD::POISON; }
 
   /// Test if this node is a memory intrinsic (with valid pointer information).
   bool isMemIntrinsic() const { return SDNodeBits.IsMemIntrinsic; }
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 02b79c67af3ee07..ce776214df1f1e4 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -16156,7 +16156,7 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
     // also recursively replace t184 by t150.
     SDValue MaybePoisonOperand = N->getOperand(0).getOperand(OpNo);
     // Don't replace every single UNDEF everywhere with frozen UNDEF, though.
-    if (MaybePoisonOperand.getOpcode() == ISD::UNDEF)
+    if (MaybePoisonOperand.isUndef())
       continue;
     // First, freeze each offending operand.
     SDValue FrozenMaybePoisonOperand = DAG.getFreeze(MaybePoisonOperand);
@@ -16182,9 +16182,10 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
   // Finally, recreate the node, it's operands were updated to use
   // frozen operands, so we just need to use it's "original" operands.
   SmallVector<SDValue> Ops(N0->ops());
-  // Special-handle ISD::UNDEF, each single one of them can be it's own thing.
+  // Special-handle ISD::UNDEF, ISD::POISON, each single one of them can be it's
+  // own thing.
   for (SDValue &Op : Ops) {
-    if (Op.getOpcode() == ISD::UNDEF)
+    if (Op.isUndef())
       Op = DAG.getFreeze(Op);
   }
 
@@ -24320,7 +24321,7 @@ static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
     if (ISD::BITCAST == Op.getOpcode() &&
         !Op.getOperand(0).getValueType().isVector())
       Ops.push_back(Op.getOperand(0));
-    else if (ISD::UNDEF == Op.getOpcode())
+    else if (Op.isUndef())
       Ops.push_back(DAG.getNode(ISD::UNDEF, DL, SVT));
     else
       return SDValue();
@@ -24715,7 +24716,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
   // fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...))
   // -> (BUILD_VECTOR A, B, ..., C, D, ...)
   auto IsBuildVectorOrUndef = [](const SDValue &Op) {
-    return ISD::UNDEF == Op.getOpcode() || ISD::BUILD_VECTOR == Op.getOpcode();
+    return Op.isUndef() || ISD::BUILD_VECTOR == Op.getOpcode();
   };
   if (llvm::all_of(N->ops(), IsBuildVectorOrUndef)) {
     SmallVector<SDValue, 8> Opnds;
@@ -24739,7 +24740,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
       EVT OpVT = Op.getValueType();
       unsigned NumElts = OpVT.getVectorNumElements();
 
-      if (ISD::UNDEF == Op.getOpcode())
+      if (Op.isUndef())
         Opnds.append(NumElts, DAG.getUNDEF(MinVT));
 
       if (ISD::BUILD_VECTOR == Op.getOpcode()) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index c6475f02199033d..7a84d695ccc5a57 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -977,6 +977,22 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
   TargetLowering::LegalizeAction Action = TargetLowering::Legal;
   bool SimpleFinishLegalizing = true;
   switch (Node->getOpcode()) {
+  // FIXME: If the node represents a poison value, replace it with an undef
+  // value.
+  //  A poison value results from an erroneous operation but does not cause
+  //  immediate undefined behavior, allowing speculative execution.
+  //  Since most operations propagate poison, it is valid to replace poison
+  //  with an undef value, which can take any legal value of the same type.
+  //  This ensures that downstream computations do not rely on poison semantics.
+  //  Poison is more restrictive than undef. Since we replace poison with undef
+  //  here, the poison information will be lost after the code is executed. In
+  //  the futher, If we need to retain the poison information after the code is
+  //  executed, we will need to modify the code accordingly.
+  case ISD::POISON: {
+    SDValue UndefNode = DAG.getUNDEF(Node->getValueType(0));
+    ReplaceNode(Node, UndefNode.getNode());
+    break;
+  }
   case ISD::INTRINSIC_W_CHAIN:
   case ISD::INTRINSIC_WO_CHAIN:
   case ISD::INTRINSIC_VOID:
@@ -3136,6 +3152,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
     for (unsigned i = 0; i < Node->getNumValues(); i++)
       Results.push_back(Node->getOperand(i));
     break;
+  case ISD::POISON:
   case ISD::UNDEF: {
     EVT VT = Node->getValueType(0);
     if (VT.isInteger())
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 71f100bfa034343..1c28722f3f0a284 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -164,6 +164,7 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
     case ISD::STRICT_UINT_TO_FP:
     case ISD::SINT_TO_FP:
     case ISD::UINT_TO_FP:  R = SoftenFloatRes_XINT_TO_FP(N); break;
+    case ISD::POISON:
     case ISD::UNDEF:       R = SoftenFloatRes_UNDEF(N); break;
     case ISD::VAARG:       R = SoftenFloatRes_VAARG(N); break;
     case ISD::VECREDUCE_FADD:
@@ -1474,6 +1475,7 @@ void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
     report_fatal_error("Do not know how to expand the result of this "
                        "operator!");
     // clang-format off
+  case ISD::POISON:
   case ISD::UNDEF:        SplitRes_UNDEF(N, Lo, Hi); break;
   case ISD::SELECT:       SplitRes_Select(N, Lo, Hi); break;
   case ISD::SELECT_CC:    SplitRes_SELECT_CC(N, Lo, Hi); break;
@@ -2783,6 +2785,7 @@ void DAGTypeLegalizer::PromoteFloatResult(SDNode *N, unsigned ResNo) {
 
     case ISD::SINT_TO_FP:
     case ISD::UINT_TO_FP: R = PromoteFloatRes_XINT_TO_FP(N); break;
+    case ISD::POISON:
     case ISD::UNDEF:      R = PromoteFloatRes_UNDEF(N); break;
     case ISD::ATOMIC_SWAP: R = BitcastToInt_ATOMIC_SWAP(N); break;
     case ISD::VECREDUCE_FADD:
@@ -3242,6 +3245,7 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) {
   case ISD::STRICT_UINT_TO_FP:
   case ISD::SINT_TO_FP:
   case ISD::UINT_TO_FP:  R = SoftPromoteHalfRes_XINT_TO_FP(N); break;
+  case ISD::POISON:
   case ISD::UNDEF:       R = SoftPromoteHalfRes_UNDEF(N); break;
   case ISD::ATOMIC_SWAP: R = BitcastToInt_ATOMIC_SWAP(N); break;
   case ISD::VECREDUCE_FADD:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index be7521f34168503..4da7bf9a680abe1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -118,6 +118,7 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
   case ISD::VP_SRL:      Res = PromoteIntRes_SRL(N); break;
   case ISD::VP_TRUNCATE:
   case ISD::TRUNCATE:    Res = PromoteIntRes_TRUNCATE(N); break;
+  case ISD::POISON:
   case ISD::UNDEF:       Res = PromoteIntRes_UNDEF(N); break;
   case ISD::VAARG:       Res = PromoteIntRes_VAARG(N); break;
   case ISD::VSCALE:      Res = PromoteIntRes_VSCALE(N); break;
@@ -2840,6 +2841,7 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
   case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, ResNo, Lo, Hi); break;
   case ISD::SELECT:       SplitRes_Select(N, Lo, Hi); break;
   case ISD::SELECT_CC:    SplitRes_SELECT_CC(N, Lo, Hi); break;
+  case ISD::POISON:
   case ISD::UNDEF:        SplitRes_UNDEF(N, Lo, Hi); break;
   case ISD::FREEZE:       SplitRes_FREEZE(N, Lo, Hi); break;
   case ISD::SETCC:        ExpandIntRes_SETCC(N, Lo, Hi); break;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 5117eb8d91dfb21..37aad1e21f7bd2b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -71,6 +71,7 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
   case ISD::SELECT:            R = ScalarizeVecRes_SELECT(N); break;
   case ISD::SELECT_CC:         R = ScalarizeVecRes_SELECT_CC(N); break;
   case ISD::SETCC:             R = ScalarizeVecRes_SETCC(N); break;
+  case ISD::POISON:
   case ISD::UNDEF:             R = ScalarizeVecRes_UNDEF(N); break;
   case ISD::VECTOR_SHUFFLE:    R = ScalarizeVecRes_VECTOR_SHUFFLE(N); break;
   case ISD::IS_FPCLASS:        R = ScalarizeVecRes_IS_FPCLASS(N); break;
@@ -1117,6 +1118,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
   case ISD::VP_MERGE:
   case ISD::VP_SELECT:    SplitRes_Select(N, Lo, Hi); break;
   case ISD::SELECT_CC:    SplitRes_SELECT_CC(N, Lo, Hi); break;
+  case ISD::POISON:
   case ISD::UNDEF:        SplitRes_UNDEF(N, Lo, Hi); break;
   case ISD::BITCAST:           SplitVecRes_BITCAST(N, Lo, Hi); break;
   case ISD::BUILD_VECTOR:      SplitVecRes_BUILD_VECTOR(N, Lo, Hi); break;
@@ -4523,6 +4525,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
   case ISD::SELECT_CC:         Res = WidenVecRes_SELECT_CC(N); break;
   case ISD::VP_SETCC:
   case ISD::SETCC:             Res = WidenVecRes_SETCC(N); break;
+  case ISD::POISON:
   case ISD::UNDEF:             Res = WidenVecRes_UNDEF(N); break;
   case ISD::VECTOR_SHUFFLE:
     Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N));
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 0dfd0302ae5438c..6d62760d25dfeda 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -5458,6 +5458,9 @@ bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op,
   case ISD::CopyFromReg:
     return true;
 
+  case ISD::POISON:
+    return false;
+
   case ISD::UNDEF:
     return PoisonOnly;
 
@@ -6307,9 +6310,10 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
         Flags.setNonNeg(N1->getFlags().hasNonNeg());
       return getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
     }
-    if (OpOpcode == ISD::UNDEF)
+    if (N1.isUndef())
       // sext(undef) = 0, because the top bits will all be the same.
       return getConstant(0, DL, VT);
+
     break;
   case ISD::ZERO_EXTEND:
     assert(VT.isInteger() && N1.getValueType().isInteger() &&
@@ -6327,7 +6331,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
       Flags.setNonNeg(N1->getFlags().hasNonNeg());
       return getNode(ISD::ZERO_EXTEND, DL, VT, N1.getOperand(0), Flags);
     }
-    if (OpOpcode == ISD::UNDEF)
+    if (N1.isUndef())
       // zext(undef) = 0, because the top bits will be zero.
       return getConstant(0, DL, VT);
 
@@ -6369,7 +6373,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
       // (ext (zext x)) -> (zext x)  and  (ext (sext x)) -> (sext x)
       return getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
     }
-    if (OpOpcode == ISD::UNDEF)
+    if (N1.isUndef())
       return getUNDEF(VT);
 
     // (ext (trunc x)) -> x
@@ -6404,7 +6408,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
         return getNode(ISD::TRUNCATE, DL, VT, N1.getOperand(0));
       return N1.getOperand(0);
     }
-    if (OpOpcode == ISD::UNDEF)
+    if (N1.isUndef())
       return getUNDEF(VT);
     if (OpOpcode == ISD::VSCALE && !NewNodesMustHaveLegalTypes)
       return getVScale(DL, VT,
@@ -6422,14 +6426,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
     break;
   case ISD::ABS:
     assert(VT.isInteger() && VT == N1.getValueType() && "Invalid ABS!");
-    if (OpOpcode == ISD::UNDEF)
+    if (N1.isUndef())
       return getConstant(0, DL, VT);
     break;
   case ISD::BSWAP:
     assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BSWAP!");
     assert((VT.getScalarSizeInBits() % 16 == 0) &&
            "BSWAP types must be a multiple of 16 bits!");
-    if (OpOpcode == ISD::UNDEF)
+    if (N1.isUndef())
       return getUNDEF(VT);
     // bswap(bswap(X)) -> X.
     if (OpOpcode == ISD::BSWAP)
@@ -6437,7 +6441,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
     break;
   case ISD::BITREVERSE:
     assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BITREVERSE!");
-    if (OpOpcode == ISD::UNDEF)
+    if (N1.isUndef())
       return getUNDEF(VT);
     break;
   case ISD::BITCAST:
@@ -6446,7 +6450,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
     if (VT == N1.getValueType()) return N1;   // noop conversion.
     if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
       return getNode(ISD::BITCAST, DL, VT, N1.getOperand(0));
-    if (OpOpcode == ISD::UNDEF)
+    if (N1.isUndef())
       return getUNDEF(VT);
     break;
   case ISD::SCALAR_TO_VECTOR:
@@ -6456,7 +6460,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
              N1.getValueType().isInteger() &&
              VT.getVectorElementType().bitsLE(N1.getValueType()))) &&
            "Illegal SCALAR_TO_VECTOR node!");
-    if (OpOpcode == ISD::UNDEF)
+    if (N1.isUndef())
       return getUNDEF(VT);
     // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
     if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
@@ -6467,7 +6471,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
     break;
   case ISD::FNEG:
     // Negation of an unknown bag of bits is still completely undefined.
-    if (OpOpcode == ISD::UNDEF)
+    if (N1.isUndef())
       return getUNDEF(VT);
 
     if (OpOpcode == ISD::FNEG) // --X -> X
@@ -9237,6 +9241,11 @@ SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
 
   SDVTList VTs = Indexed ?
     getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
+
+  // Lower poison to undef.
+  if (Ptr.getNode()->isPoison())
+    Ptr = getUNDEF(Ptr.getValueType());
+
   SDValue Ops[] = { Chain, Ptr, Offset };
   FoldingSetNodeID ID;
   AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
@@ -13374,7 +13383,7 @@ void BuildVectorSDNode::recastRawBits(bool IsLittleEndian,
 bool BuildVectorSDNode::isConstant() const {
   for (const SDValue &Op : op_values()) {
     unsigned Opc = Op.getOpcode();
-    if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
+    if (!Op.isUndef() && Opc != ISD::Constant && Opc != ISD::ConstantFP)
       return false;
   }
   return true;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index f8d7c3ef7bbe71a..fe4e3c00d4260ee 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -1819,7 +1819,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
 
     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
-      return DAG.getUNDEF(VT);
+      return isa<PoisonValue>(C) ? DAG.getPoison(VT) : DAG.getUNDEF(VT);
 
     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
       visit(CE->getOpcode(), *CE);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 580ff19065557ba..4434b1203451f9e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -188,6 +188,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
   case ISD::CopyToReg:                  return "CopyToReg";
   case ISD::CopyFromReg:                return "CopyFromReg";
   case ISD::UNDEF:                      return "undef";
+  case ISD::POISON:                     return "poison";
   case ISD::VSCALE:                     return "vscale";
   case ISD::MERGE_VALUES:               return "merge_values";
   case ISD::INLINEASM:                  return "inlineasm";
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index d64a90bcaae7dac..48fd1e1e4591952 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -3275,6 +3275,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
   case ISD::WRITE_REGISTER:
     Select_WRITE_REGISTER(NodeToMatch);
     return;
+  case ISD::POISON:
   case ISD::UNDEF:
     Select_UNDEF(NodeToMatch);
     return;

>From a3963e2af89ae6051c3d3ff86792fbd4ac5e67e8 Mon Sep 17 00:00:00 2001
From: zhijian <zhijian at ca.ibm.com>
Date: Fri, 7 Feb 2025 15:04:49 +0000
Subject: [PATCH 2/2] add a new helper function isUndefOrPosion

---
 llvm/include/llvm/CodeGen/SelectionDAG.h      |   4 +-
 llvm/include/llvm/CodeGen/SelectionDAGNodes.h |  18 +-
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 322 +++++++-------
 llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp |  15 +-
 .../SelectionDAG/LegalizeFloatTypes.cpp       |   4 -
 .../SelectionDAG/LegalizeVectorTypes.cpp      |  45 +-
 .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 209 ++++-----
 .../SelectionDAG/StatepointLowering.cpp       |   8 +-
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |  30 +-
 .../Target/AArch64/AArch64ISelLowering.cpp    | 101 +++--
 llvm/lib/Target/ARM/ARMISelLowering.cpp       |  68 +--
 llvm/lib/Target/PowerPC/PPCISelLowering.cpp   |  63 +--
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 106 ++---
 llvm/lib/Target/X86/X86ISelLowering.cpp       | 411 ++++++++++--------
 14 files changed, 742 insertions(+), 662 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index 04d72eca9a35be2..c7e41aec7a0b8ee 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -871,7 +871,7 @@ class SelectionDAG {
   /// for integers, a type wider than) VT's element type.
   SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
     // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
-    if (Op.isUndef()) {
+    if (Op.isUndefOrPoison()) {
       assert((VT.getVectorElementType() == Op.getValueType() ||
               (VT.isInteger() &&
                VT.getVectorElementType().bitsLE(Op.getValueType()))) &&
@@ -887,7 +887,7 @@ class SelectionDAG {
   // Return a splat ISD::SPLAT_VECTOR node, consisting of Op splatted to all
   // elements.
   SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op) {
-    if (Op.isUndef()) {
+    if (Op.isUndefOrPoison()) {
       assert((VT.getVectorElementType() == Op.getValueType() ||
               (VT.isInteger() &&
                VT.getVectorElementType().bitsLE(Op.getValueType()))) &&
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index c98c9da43a30804..939caa9af3bf2c0 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -213,6 +213,7 @@ class SDValue {
   inline bool isTargetOpcode() const;
   inline bool isMachineOpcode() const;
   inline bool isUndef() const;
+  inline bool isUndefOrPoison() const;
   inline unsigned getMachineOpcode() const;
   inline const DebugLoc &getDebugLoc() const;
   inline void dump() const;
@@ -690,18 +691,17 @@ END_TWO_BYTE_PACK()
   /// \<target\>ISD namespace).
   bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
 
-  /// Returns true if the node type is UNDEF or, when UndefOnly is false,
-  /// POISON.
-  /// - When UndefOnly is true, returns true only for UNDEF.
-  /// - When UndefOnly is false, returns true for both UNDEF and POISON.
-  /// @param UndefOnly Determines whether to check only for UNDEF.
-  bool isUndef(bool UndefOnly = false) const {
-    return NodeType == ISD::UNDEF || (!UndefOnly && NodeType == ISD::POISON);
-  }
+  /// Return true if the type of the node type undefined.
+  bool isUndef() const { return NodeType == ISD::UNDEF; }
 
   /// Return true if the type of the node type poison.
   bool isPoison() const { return NodeType == ISD::POISON; }
 
+  /// Return true if the type of the node type undefined or poison.
+  bool isUndefOrPoison() const {
+    return NodeType == ISD::UNDEF || NodeType == ISD::POISON;
+  }
+
   /// Test if this node is a memory intrinsic (with valid pointer information).
   bool isMemIntrinsic() const { return SDNodeBits.IsMemIntrinsic; }
 
@@ -1259,6 +1259,8 @@ inline bool SDValue::isUndef() const {
   return Node->isUndef();
 }
 
+inline bool SDValue::isUndefOrPoison() const { return Node->isUndefOrPoison(); }
+
 inline bool SDValue::use_empty() const {
   return !Node->hasAnyUseOfValue(ResNo);
 }
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index ce776214df1f1e4..0cd45054c806c1a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -1053,7 +1053,7 @@ static bool isConstantOrConstantVector(SDValue N, bool NoOpaques = false) {
     return false;
   unsigned BitWidth = N.getScalarValueSizeInBits();
   for (const SDValue &Op : N->op_values()) {
-    if (Op.isUndef())
+    if (Op.isUndefOrPoison())
       continue;
     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Op);
     if (!Const || Const->getAPIntValue().getBitWidth() != BitWidth ||
@@ -2108,7 +2108,7 @@ SDValue DAGCombiner::visitFCANONICALIZE(SDNode *N) {
   SDLoc dl(N);
 
   // Canonicalize undef to quiet NaN.
-  if (Operand.isUndef()) {
+  if (Operand.isUndefOrPoison()) {
     APFloat CanonicalQNaN = APFloat::getQNaN(VT.getFltSemantics());
     return DAG.getConstantFP(CanonicalQNaN, dl, VT);
   }
@@ -2670,9 +2670,9 @@ SDValue DAGCombiner::visitADDLike(SDNode *N) {
   SDLoc DL(N);
 
   // fold (add x, undef) -> undef
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return N0;
-  if (N1.isUndef())
+  if (N1.isUndefOrPoison())
     return N1;
 
   // fold (add c1, c2) -> c1+c2
@@ -3049,7 +3049,7 @@ SDValue DAGCombiner::visitADDSAT(SDNode *N) {
   SDLoc DL(N);
 
   // fold (add_sat x, undef) -> -1
-  if (N0.isUndef() || N1.isUndef())
+  if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
     return DAG.getAllOnesConstant(DL, VT);
 
   // fold (add_sat c1, c2) -> c3
@@ -4053,9 +4053,9 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
                        DAG.getNode(ISD::MUL, DL, VT, B, C));
 
   // If either operand of a sub is undef, the result is undef
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return N0;
-  if (N1.isUndef())
+  if (N1.isUndefOrPoison())
     return N1;
 
   if (SDValue V = foldAddSubBoolOfMaskedVal(N, DL, DAG))
@@ -4250,7 +4250,7 @@ SDValue DAGCombiner::visitSUBSAT(SDNode *N) {
   SDLoc DL(N);
 
   // fold (sub_sat x, undef) -> 0
-  if (N0.isUndef() || N1.isUndef())
+  if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
     return DAG.getConstant(0, DL, VT);
 
   // fold (sub_sat x, x) -> 0
@@ -4403,7 +4403,7 @@ SDValue DAGCombiner::visitMULFIX(SDNode *N) {
   EVT VT = N0.getValueType();
 
   // fold (mulfix x, undef, scale) -> 0
-  if (N0.isUndef() || N1.isUndef())
+  if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
     return DAG.getConstant(0, SDLoc(N), VT);
 
   // Canonicalize constant to RHS (vector doesn't have to splat)
@@ -4428,7 +4428,7 @@ template <class MatchContextClass> SDValue DAGCombiner::visitMUL(SDNode *N) {
   MatchContextClass Matcher(DAG, TLI, N);
 
   // fold (mul x, undef) -> 0
-  if (N0.isUndef() || N1.isUndef())
+  if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
     return DAG.getConstant(0, DL, VT);
 
   // fold (mul c1, c2) -> c1*c2
@@ -4788,7 +4788,7 @@ static SDValue simplifyDivRem(SDNode *N, SelectionDAG &DAG) {
 
   // undef / X -> 0
   // undef % X -> 0
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getConstant(0, DL, VT);
 
   // 0 / X -> 0
@@ -5207,7 +5207,7 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {
         DAG.getShiftAmountConstant(N0.getScalarValueSizeInBits() - 1, VT, DL));
 
   // fold (mulhs x, undef) -> 0
-  if (N0.isUndef() || N1.isUndef())
+  if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
     return DAG.getConstant(0, DL, VT);
 
   // If the type twice as wide is legal, transform the mulhs to a wider multiply
@@ -5264,7 +5264,7 @@ SDValue DAGCombiner::visitMULHU(SDNode *N) {
     return DAG.getConstant(0, DL, VT);
 
   // fold (mulhu x, undef) -> 0
-  if (N0.isUndef() || N1.isUndef())
+  if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
     return DAG.getConstant(0, DL, VT);
 
   // fold (mulhu x, (1 << c)) -> x >> (bitwidth - c)
@@ -5328,9 +5328,9 @@ SDValue DAGCombiner::visitAVG(SDNode *N) {
       return FoldedVOp;
 
   // fold (avg x, undef) -> x
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return N1;
-  if (N1.isUndef())
+  if (N1.isUndefOrPoison())
     return N0;
 
   // fold (avg x, x) --> x
@@ -5423,7 +5423,7 @@ SDValue DAGCombiner::visitABD(SDNode *N) {
       return FoldedVOp;
 
   // fold (abd x, undef) -> 0
-  if (N0.isUndef() || N1.isUndef())
+  if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
     return DAG.getConstant(0, DL, VT);
 
   // fold (abd x, x) -> 0
@@ -5850,8 +5850,9 @@ SDValue DAGCombiner::visitIMINMAX(SDNode *N) {
   // 2. The saturation pattern is broken by canonicalization in InstCombine.
   bool IsOpIllegal = !TLI.isOperationLegal(Opcode, VT);
   bool IsSatBroken = Opcode == ISD::UMIN && N0.getOpcode() == ISD::SMAX;
-  if ((IsSatBroken || IsOpIllegal) && (N0.isUndef() || DAG.SignBitIsZero(N0)) &&
-      (N1.isUndef() || DAG.SignBitIsZero(N1))) {
+  if ((IsSatBroken || IsOpIllegal) &&
+      (N0.isUndefOrPoison() || DAG.SignBitIsZero(N0)) &&
+      (N1.isUndefOrPoison() || DAG.SignBitIsZero(N1))) {
     unsigned AltOpcode;
     switch (Opcode) {
     case ISD::SMIN: AltOpcode = ISD::UMIN; break;
@@ -6053,7 +6054,7 @@ SDValue DAGCombiner::hoistLogicOpWithSameOpcodeHands(SDNode *N) {
     // Don't try to fold this node if it requires introducing a
     // build vector of all zeros that might be illegal at this stage.
     SDValue ShOp = N0.getOperand(1);
-    if (LogicOpcode == ISD::XOR && !ShOp.isUndef())
+    if (LogicOpcode == ISD::XOR && !ShOp.isUndefOrPoison())
       ShOp = tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);
 
     // (logic_op (shuf (A, C), shuf (B, C))) --> shuf (logic_op (A, B), C)
@@ -6066,7 +6067,7 @@ SDValue DAGCombiner::hoistLogicOpWithSameOpcodeHands(SDNode *N) {
     // Don't try to fold this node if it requires introducing a
     // build vector of all zeros that might be illegal at this stage.
     ShOp = N0.getOperand(0);
-    if (LogicOpcode == ISD::XOR && !ShOp.isUndef())
+    if (LogicOpcode == ISD::XOR && !ShOp.isUndefOrPoison())
       ShOp = tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);
 
     // (logic_op (shuf (C, A), shuf (C, B))) --> shuf (C, logic_op (A, B))
@@ -6512,7 +6513,7 @@ SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, SDNode *N) {
   SDLoc DL(N);
 
   // fold (and x, undef) -> 0
-  if (N0.isUndef() || N1.isUndef())
+  if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
     return DAG.getConstant(0, DL, VT);
 
   if (SDValue V = foldLogicOfSetCCs(true, N0, N1, DL))
@@ -7861,7 +7862,7 @@ SDValue DAGCombiner::visitORLike(SDValue N0, SDValue N1, const SDLoc &DL) {
   EVT VT = N1.getValueType();
 
   // fold (or x, undef) -> -1
-  if (!LegalOperations && (N0.isUndef() || N1.isUndef()))
+  if (!LegalOperations && (N0.isUndefOrPoison() || N1.isUndefOrPoison()))
     return DAG.getAllOnesConstant(DL, VT);
 
   if (SDValue V = foldLogicOfSetCCs(false, N0, N1, DL))
@@ -9561,13 +9562,13 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
   SDLoc DL(N);
 
   // fold (xor undef, undef) -> 0. This is a common idiom (misuse).
-  if (N0.isUndef() && N1.isUndef())
+  if (N0.isUndefOrPoison() && N1.isUndefOrPoison())
     return DAG.getConstant(0, DL, VT);
 
   // fold (xor x, undef) -> undef
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return N0;
-  if (N1.isUndef())
+  if (N1.isUndefOrPoison())
     return N1;
 
   // fold (xor c1, c2) -> c1^c2
@@ -10962,7 +10963,7 @@ SDValue DAGCombiner::visitFunnelShift(SDNode *N) {
       return IsFSHL ? N0 : N1;
 
   auto IsUndefOrZero = [](SDValue V) {
-    return V.isUndef() || isNullOrNullSplat(V, /*AllowUndefs*/ true);
+    return V.isUndefOrPoison() || isNullOrNullSplat(V, /*AllowUndefs*/ true);
   };
 
   // TODO - support non-uniform vector shift amounts.
@@ -12086,7 +12087,7 @@ static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) {
   // length of the BV and see if all the non-undef nodes are the same.
   ConstantSDNode *BottomHalf = nullptr;
   for (int i = 0; i < NumElems / 2; ++i) {
-    if (Cond->getOperand(i)->isUndef())
+    if (Cond->getOperand(i)->isUndefOrPoison())
       continue;
 
     if (BottomHalf == nullptr)
@@ -12098,7 +12099,7 @@ static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) {
   // Do the same for the second half of the BuildVector
   ConstantSDNode *TopHalf = nullptr;
   for (int i = NumElems / 2; i < NumElems; ++i) {
-    if (Cond->getOperand(i)->isUndef())
+    if (Cond->getOperand(i)->isUndefOrPoison())
       continue;
 
     if (TopHalf == nullptr)
@@ -12262,7 +12263,7 @@ SDValue DAGCombiner::visitMSTORE(SDNode *N) {
   if (MaskedStoreSDNode *MST1 = dyn_cast<MaskedStoreSDNode>(Chain)) {
     if (MST->isUnindexed() && MST->isSimple() && MST1->isUnindexed() &&
         MST1->isSimple() && MST1->getBasePtr() == Ptr &&
-        !MST->getBasePtr().isUndef() &&
+        !MST->getBasePtr().isUndefOrPoison() &&
         ((Mask == MST1->getMask() && MST->getMemoryVT().getStoreSize() ==
                                          MST1->getMemoryVT().getStoreSize()) ||
          ISD::isConstantSplatVectorAllOnes(Mask.getNode())) &&
@@ -12350,13 +12351,13 @@ SDValue DAGCombiner::visitVECTOR_COMPRESS(SDNode *N) {
   SDValue Passthru = N->getOperand(2);
   EVT VecVT = Vec.getValueType();
 
-  bool HasPassthru = !Passthru.isUndef();
+  bool HasPassthru = !Passthru.isUndefOrPoison();
 
   APInt SplatVal;
   if (ISD::isConstantSplatVector(Mask.getNode(), SplatVal))
     return TLI.isConstTrueVal(Mask) ? Vec : Passthru;
 
-  if (Vec.isUndef() || Mask.isUndef())
+  if (Vec.isUndefOrPoison() || Mask.isUndefOrPoison())
     return Passthru;
 
   // No need for potentially expensive compress if the mask is constant.
@@ -12368,7 +12369,7 @@ SDValue DAGCombiner::visitVECTOR_COMPRESS(SDNode *N) {
     for (unsigned I = 0; I < NumElmts; ++I) {
       SDValue MaskI = Mask.getOperand(I);
       // We treat undef mask entries as "false".
-      if (MaskI.isUndef())
+      if (MaskI.isUndefOrPoison())
         continue;
 
       if (TLI.isConstTrueVal(MaskI)) {
@@ -12546,7 +12547,7 @@ SDValue DAGCombiner::foldVSelectOfConstants(SDNode *N) {
   for (unsigned i = 0; i != Elts; ++i) {
     SDValue N1Elt = N1.getOperand(i);
     SDValue N2Elt = N2.getOperand(i);
-    if (N1Elt.isUndef() || N2Elt.isUndef())
+    if (N1Elt.isUndefOrPoison() || N2Elt.isUndefOrPoison())
       continue;
     if (N1Elt.getValueType() != N2Elt.getValueType()) {
       AllAddOne = false;
@@ -12908,7 +12909,7 @@ SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
 
     // When the condition is UNDEF, just return the first operand. This is
     // coherent the DAG creation, no setcc node is created in this case
-    if (SCC->isUndef())
+    if (SCC->isUndefOrPoison())
       return N2;
 
     // Fold to a simpler select_cc
@@ -13215,7 +13216,7 @@ static SDValue tryToFoldExtendOfConstant(SDNode *N, const SDLoc &DL,
 
   for (unsigned i = 0; i != NumElts; ++i) {
     SDValue Op = N0.getOperand(i);
-    if (Op.isUndef()) {
+    if (Op.isUndefOrPoison()) {
       if (Opcode == ISD::ANY_EXTEND || Opcode == ISD::ANY_EXTEND_VECTOR_INREG)
         Elts.push_back(DAG.getUNDEF(SVT));
       else
@@ -13861,7 +13862,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
       return FoldedVOp;
 
   // sext(undef) = 0 because the top bit will all be the same.
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getConstant(0, DL, VT);
 
   if (SDValue Res = tryToFoldExtendOfConstant(N, DL, TLI, DAG, LegalTypes))
@@ -14136,7 +14137,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
       return FoldedVOp;
 
   // zext(undef) = 0
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getConstant(0, DL, VT);
 
   if (SDValue Res = tryToFoldExtendOfConstant(N, DL, TLI, DAG, LegalTypes))
@@ -14461,7 +14462,7 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
   SDLoc DL(N);
 
   // aext(undef) = undef
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getUNDEF(VT);
 
   if (SDValue Res = tryToFoldExtendOfConstant(N, DL, TLI, DAG, LegalTypes))
@@ -14967,7 +14968,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
   SDLoc DL(N);
 
   // sext_vector_inreg(undef) = 0 because the top bit will all be the same.
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getConstant(0, DL, VT);
 
   // fold (sext_in_reg c1) -> c1
@@ -15187,7 +15188,7 @@ SDValue DAGCombiner::visitEXTEND_VECTOR_INREG(SDNode *N) {
   EVT VT = N->getValueType(0);
   SDLoc DL(N);
 
-  if (N0.isUndef()) {
+  if (N0.isUndefOrPoison()) {
     // aext_vector_inreg(undef) = undef because the top bits are undefined.
     // {s/z}ext_vector_inreg(undef) = 0 because the top bits must be the same.
     return N->getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG
@@ -15333,7 +15334,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
   SDLoc DL(N);
 
   // trunc(undef) = undef
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getUNDEF(VT);
 
   // fold (truncate (truncate x)) -> (truncate x)
@@ -15553,7 +15554,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
 
     for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) {
       SDValue X = N0.getOperand(i);
-      if (!X.isUndef()) {
+      if (!X.isUndefOrPoison()) {
         V = X;
         Idx = i;
         NumDefs++;
@@ -15799,7 +15800,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
   SDValue N0 = N->getOperand(0);
   EVT VT = N->getValueType(0);
 
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getUNDEF(VT);
 
   // If the input is a BUILD_VECTOR with all constant elements, fold this now.
@@ -16036,7 +16037,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
       if (Op.getOpcode() == ISD::BITCAST &&
           Op.getOperand(0).getValueType() == VT)
         return SDValue(Op.getOperand(0));
-      if (Op.isUndef() || isAnyConstantBuildVector(Op))
+      if (Op.isUndefOrPoison() || isAnyConstantBuildVector(Op))
         return DAG.getBitcast(VT, Op);
       return SDValue();
     };
@@ -16116,8 +16117,9 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
     if (llvm::ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
       SmallVector<SDValue, 8> NewVecC;
       for (const SDValue &Op : N0->op_values())
-        NewVecC.push_back(
-            Op.isUndef() ? DAG.getConstant(0, DL, Op.getValueType()) : Op);
+        NewVecC.push_back(Op.isUndefOrPoison()
+                              ? DAG.getConstant(0, DL, Op.getValueType())
+                              : Op);
       return DAG.getBuildVector(VT, DL, NewVecC);
     }
   }
@@ -16156,7 +16158,7 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
     // also recursively replace t184 by t150.
     SDValue MaybePoisonOperand = N->getOperand(0).getOperand(OpNo);
     // Don't replace every single UNDEF everywhere with frozen UNDEF, though.
-    if (MaybePoisonOperand.isUndef())
+    if (MaybePoisonOperand.isUndefOrPoison())
       continue;
     // First, freeze each offending operand.
     SDValue FrozenMaybePoisonOperand = DAG.getFreeze(MaybePoisonOperand);
@@ -16185,7 +16187,7 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) {
   // Special-handle ISD::UNDEF, ISD::POISON, each single one of them can be it's
   // own thing.
   for (SDValue &Op : Ops) {
-    if (Op.isUndef())
+    if (Op.isUndefOrPoison())
       Op = DAG.getFreeze(Op);
   }
 
@@ -18148,7 +18150,7 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
   SDLoc DL(N);
 
   // [us]itofp(undef) = 0, because the result value is bounded.
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getConstantFP(0.0, DL, VT);
 
   // fold (sint_to_fp c1) -> c1fp
@@ -18196,7 +18198,7 @@ SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
   SDLoc DL(N);
 
   // [us]itofp(undef) = 0, because the result value is bounded.
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getConstantFP(0.0, DL, VT);
 
   // fold (uint_to_fp c1) -> c1fp
@@ -18274,7 +18276,7 @@ SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
   SDLoc DL(N);
 
   // fold (fp_to_sint undef) -> undef
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getUNDEF(VT);
 
   // fold (fp_to_sint c1fp) -> c1
@@ -18290,7 +18292,7 @@ SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
   SDLoc DL(N);
 
   // fold (fp_to_uint undef) -> undef
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getUNDEF(VT);
 
   // fold (fp_to_uint c1fp) -> c1
@@ -18306,7 +18308,7 @@ SDValue DAGCombiner::visitXROUND(SDNode *N) {
 
   // fold (lrint|llrint undef) -> undef
   // fold (lround|llround undef) -> undef
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getUNDEF(VT);
 
   // fold (lrint|llrint c1fp) -> c1
@@ -19448,7 +19450,7 @@ SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
   }
 
   // TODO: Deal with nonzero offset.
-  if (LD->getBasePtr().isUndef() || Offset != 0)
+  if (LD->getBasePtr().isUndefOrPoison() || Offset != 0)
     return SDValue();
   // Model necessary truncations / extenstions.
   // Truncate Value To Stored Memory Size.
@@ -19763,7 +19765,7 @@ struct LoadedSlice {
       return false;
 
     // Offsets are for indexed load only, we do not handle that.
-    if (!Origin->getOffset().isUndef())
+    if (!Origin->getOffset().isUndefOrPoison())
       return false;
 
     const TargetLowering &TLI = DAG->getTargetLoweringInfo();
@@ -20829,7 +20831,7 @@ DAGCombiner::getStoreMergeCandidates(StoreSDNode *St,
   // pointer. We must have a base and an offset. Do not handle stores to undef
   // base pointers.
   BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
-  if (!BasePtr.getBase().getNode() || BasePtr.getBase().isUndef())
+  if (!BasePtr.getBase().getNode() || BasePtr.getBase().isUndefOrPoison())
     return nullptr;
 
   SDValue Val = peekThroughBitcasts(St->getValue());
@@ -21890,7 +21892,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
   }
 
   // Turn 'store undef, Ptr' -> nothing.
-  if (Value.isUndef() && ST->isUnindexed() && !ST->isVolatile())
+  if (Value.isUndefOrPoison() && ST->isUnindexed() && !ST->isVolatile())
     return Chain;
 
   // Try to infer better alignment information than the store already has.
@@ -22020,7 +22022,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
       }
 
       if (OptLevel != CodeGenOptLevel::None && ST1->hasOneUse() &&
-          !ST1->getBasePtr().isUndef() &&
+          !ST1->getBasePtr().isUndefOrPoison() &&
           ST->getAddressSpace() == ST1->getAddressSpace()) {
         // If we consider two stores and one smaller in size is a scalable
         // vector type and another one a bigger size store with a fixed type,
@@ -22315,7 +22317,7 @@ static bool mergeEltWithShuffle(SDValue &X, SDValue &Y, ArrayRef<int> Mask,
   // If we failed to find a match, see if we can replace an UNDEF shuffle
   // operand.
   if (ElementOffset == -1) {
-    if (!Y.isUndef() || InsertVal0.getValueType() != Y.getValueType())
+    if (!Y.isUndefOrPoison() || InsertVal0.getValueType() != Y.getValueType())
       return false;
     ElementOffset = Mask.size();
     Y = InsertVal0;
@@ -22535,7 +22537,7 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
   if (!IndexC) {
     // If this is variable insert to undef vector, it might be better to splat:
     // inselt undef, InVal, EltNo --> build_vector < InVal, InVal, ... >
-    if (InVec.isUndef() && TLI.shouldSplatInsEltVarIndex(VT))
+    if (InVec.isUndefOrPoison() && TLI.shouldSplatInsEltVarIndex(VT))
       return DAG.getSplat(VT, DL, InVal);
     return SDValue();
   }
@@ -22624,7 +22626,7 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
     // Recurse up a INSERT_VECTOR_ELT chain to build a BUILD_VECTOR.
     for (SDValue CurVec = InVec; CurVec;) {
       // UNDEF - build new BUILD_VECTOR from already inserted operands.
-      if (CurVec.isUndef())
+      if (CurVec.isUndefOrPoison())
         return CanonicalizeBuildVector(Ops);
 
       // BUILD_VECTOR - insert unused operands and build new BUILD_VECTOR.
@@ -23010,7 +23012,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
   SDValue Index = N->getOperand(1);
   EVT ScalarVT = N->getValueType(0);
   EVT VecVT = VecOp.getValueType();
-  if (VecOp.isUndef())
+  if (VecOp.isUndefOrPoison())
     return DAG.getUNDEF(ScalarVT);
 
   // extract_vector_elt (insert_vector_elt vec, val, idx), idx) -> val
@@ -23373,7 +23375,8 @@ SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
   for (unsigned i = 0; i != NumInScalars; ++i) {
     SDValue In = N->getOperand(i);
     // Ignore undef inputs.
-    if (In.isUndef()) continue;
+    if (In.isUndefOrPoison())
+      continue;
 
     bool AnyExt  = In.getOpcode() == ISD::ANY_EXTEND;
     bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND;
@@ -23433,10 +23436,10 @@ SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
     SDValue Cast = N->getOperand(i);
     assert((Cast.getOpcode() == ISD::ANY_EXTEND ||
-            Cast.getOpcode() == ISD::ZERO_EXTEND ||
-            Cast.isUndef()) && "Invalid cast opcode");
+            Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndefOrPoison()) &&
+           "Invalid cast opcode");
     SDValue In;
-    if (Cast.isUndef())
+    if (Cast.isUndefOrPoison())
       In = DAG.getUNDEF(SourceType);
     else
       In = Cast->getOperand(0);
@@ -23506,7 +23509,8 @@ SDValue DAGCombiner::reduceBuildVecTruncToBitCast(SDNode *N) {
   for (unsigned i = 0; i != NumInScalars; ++i) {
     SDValue In = PeekThroughBitcast(N->getOperand(i));
     // Ignore undef inputs.
-    if (In.isUndef()) continue;
+    if (In.isUndefOrPoison())
+      continue;
 
     if (In.getOpcode() != ISD::TRUNCATE)
       return SDValue();
@@ -23701,7 +23705,7 @@ static SDValue reduceBuildVecToShuffleWithZero(SDNode *BV, SelectionDAG &DAG) {
   int ZextElt = -1;
   for (int i = 0; i != NumBVOps; ++i) {
     SDValue Op = BV->getOperand(i);
-    if (Op.isUndef())
+    if (Op.isUndefOrPoison())
       continue;
     if (ZextElt == -1)
       ZextElt = i;
@@ -23811,7 +23815,7 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
   for (unsigned i = 0; i != NumElems; ++i) {
     SDValue Op = N->getOperand(i);
 
-    if (Op.isUndef())
+    if (Op.isUndefOrPoison())
       continue;
 
     // See if we can use a blend with a zero vector.
@@ -24001,7 +24005,7 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
       SDValue L = Shuffles[Left];
       ArrayRef<int> LMask;
       bool IsLeftShuffle = L.getOpcode() == ISD::VECTOR_SHUFFLE &&
-                           L.use_empty() && L.getOperand(1).isUndef() &&
+                           L.use_empty() && L.getOperand(1).isUndefOrPoison() &&
                            L.getOperand(0).getValueType() == L.getValueType();
       if (IsLeftShuffle) {
         LMask = cast<ShuffleVectorSDNode>(L.getNode())->getMask();
@@ -24010,7 +24014,8 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
       SDValue R = Shuffles[Right];
       ArrayRef<int> RMask;
       bool IsRightShuffle = R.getOpcode() == ISD::VECTOR_SHUFFLE &&
-                            R.use_empty() && R.getOperand(1).isUndef() &&
+                            R.use_empty() &&
+                            R.getOperand(1).isUndefOrPoison() &&
                             R.getOperand(0).getValueType() == R.getValueType();
       if (IsRightShuffle) {
         RMask = cast<ShuffleVectorSDNode>(R.getNode())->getMask();
@@ -24193,7 +24198,8 @@ SDValue DAGCombiner::convertBuildVecZextToBuildVecWithZeros(SDNode *N) {
   NewOps.reserve(NewIntVT.getVectorNumElements());
   for (auto I : enumerate(N->ops())) {
     SDValue Op = I.value();
-    assert(!Op.isUndef() && "FIXME: after allowing UNDEF's, handle them here.");
+    assert(!Op.isUndefOrPoison() &&
+           "FIXME: after allowing UNDEF's, handle them here.");
     unsigned SrcOpIdx = I.index();
     if (KnownZeroOps[SrcOpIdx]) {
       NewOps.append(*Factor, ZeroOp);
@@ -24294,7 +24300,8 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
   // Do this late as some of the above may replace the splat.
   if (TLI.getOperationAction(ISD::SPLAT_VECTOR, VT) != TargetLowering::Expand)
     if (SDValue V = cast<BuildVectorSDNode>(N)->getSplatValue()) {
-      assert(!V.isUndef() && "Splat of undef should have been handled earlier");
+      assert(!V.isUndefOrPoison() &&
+             "Splat of undef should have been handled earlier");
       return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, V);
     }
 
@@ -24321,7 +24328,7 @@ static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
     if (ISD::BITCAST == Op.getOpcode() &&
         !Op.getOperand(0).getValueType().isVector())
       Ops.push_back(Op.getOperand(0));
-    else if (Op.isUndef())
+    else if (Op.isUndefOrPoison())
       Ops.push_back(DAG.getNode(ISD::UNDEF, DL, SVT));
     else
       return SDValue();
@@ -24343,7 +24350,7 @@ static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
     for (SDValue &Op : Ops) {
       if (Op.getValueType() == SVT)
         continue;
-      if (Op.isUndef())
+      if (Op.isUndefOrPoison())
         Op = DAG.getNode(ISD::UNDEF, DL, SVT);
       else
         Op = DAG.getBitcast(SVT, Op);
@@ -24366,7 +24373,7 @@ static SDValue combineConcatVectorOfConcatVectors(SDNode *N,
   EVT SubVT;
   SDValue FirstConcat;
   for (const SDValue &Op : N->ops()) {
-    if (Op.isUndef())
+    if (Op.isUndefOrPoison())
       continue;
     if (Op.getOpcode() != ISD::CONCAT_VECTORS)
       return SDValue();
@@ -24384,7 +24391,7 @@ static SDValue combineConcatVectorOfConcatVectors(SDNode *N,
 
   SmallVector<SDValue> ConcatOps;
   for (const SDValue &Op : N->ops()) {
-    if (Op.isUndef()) {
+    if (Op.isUndefOrPoison()) {
       ConcatOps.append(FirstConcat->getNumOperands(), DAG.getUNDEF(SubVT));
       continue;
     }
@@ -24415,7 +24422,7 @@ static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) {
     Op = peekThroughBitcasts(Op);
 
     // UNDEF nodes convert to UNDEF shuffle mask values.
-    if (Op.isUndef()) {
+    if (Op.isUndefOrPoison()) {
       Mask.append((unsigned)NumOpElts, -1);
       continue;
     }
@@ -24433,7 +24440,7 @@ static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) {
     ExtVec = peekThroughBitcasts(ExtVec);
 
     // UNDEF nodes convert to UNDEF shuffle mask values.
-    if (ExtVec.isUndef()) {
+    if (ExtVec.isUndefOrPoison()) {
       Mask.append((unsigned)NumOpElts, -1);
       continue;
     }
@@ -24453,11 +24460,11 @@ static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) {
       return SDValue();
 
     // At most we can reference 2 inputs in the final shuffle.
-    if (SV0.isUndef() || SV0 == ExtVec) {
+    if (SV0.isUndefOrPoison() || SV0 == ExtVec) {
       SV0 = ExtVec;
       for (int i = 0; i != NumOpElts; ++i)
         Mask.push_back(i + ExtIdx);
-    } else if (SV1.isUndef() || SV1 == ExtVec) {
+    } else if (SV1.isUndefOrPoison() || SV1 == ExtVec) {
       SV1 = ExtVec;
       for (int i = 0; i != NumOpElts; ++i)
         Mask.push_back(i + ExtIdx + NumElts);
@@ -24563,10 +24570,10 @@ static SDValue combineConcatVectorOfShuffleAndItsOperands(
   ShuffleVectorSDNode *SVN = nullptr;
   for (SDValue Op : N->ops()) {
     if (auto *CurSVN = dyn_cast<ShuffleVectorSDNode>(Op);
-        CurSVN && CurSVN->getOperand(1).isUndef() && N->isOnlyUserOf(CurSVN) &&
-        all_of(N->ops(), [CurSVN](SDValue Op) {
+        CurSVN && CurSVN->getOperand(1).isUndefOrPoison() &&
+        N->isOnlyUserOf(CurSVN) && all_of(N->ops(), [CurSVN](SDValue Op) {
           // FIXME: can we allow UNDEF operands?
-          return !Op.isUndef() &&
+          return !Op.isUndefOrPoison() &&
                  (Op.getNode() == CurSVN || is_contained(CurSVN->ops(), Op));
         })) {
       SVN = CurSVN;
@@ -24580,7 +24587,7 @@ static SDValue combineConcatVectorOfShuffleAndItsOperands(
   // from the second operand, must be adjusted.
   SmallVector<int, 16> AdjustedMask;
   AdjustedMask.reserve(SVN->getMask().size());
-  assert(SVN->getOperand(1).isUndef() && "Expected unary shuffle!");
+  assert(SVN->getOperand(1).isUndefOrPoison() && "Expected unary shuffle!");
   append_range(AdjustedMask, SVN->getMask());
 
   // Identity masks for the operands of the (padded) shuffle.
@@ -24598,7 +24605,7 @@ static SDValue combineConcatVectorOfShuffleAndItsOperands(
   SmallVector<int, 32> Mask;
   Mask.reserve(VT.getVectorNumElements());
   for (SDValue Op : N->ops()) {
-    assert(!Op.isUndef() && "Not expecting to concatenate UNDEF.");
+    assert(!Op.isUndefOrPoison() && "Not expecting to concatenate UNDEF.");
     if (Op.getNode() == SVN) {
       append_range(Mask, AdjustedMask);
       continue;
@@ -24624,7 +24631,7 @@ static SDValue combineConcatVectorOfShuffleAndItsOperands(
   for (auto I : zip(SVN->ops(), ShufOps)) {
     SDValue ShufOp = std::get<0>(I);
     SDValue &NewShufOp = std::get<1>(I);
-    if (ShufOp.isUndef())
+    if (ShufOp.isUndefOrPoison())
       NewShufOp = DAG.getUNDEF(VT);
     else {
       SmallVector<SDValue, 2> ShufOpParts(N->getNumOperands(),
@@ -24649,7 +24656,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
 
   // Optimize concat_vectors where all but the first of the vectors are undef.
   if (all_of(drop_begin(N->ops()),
-             [](const SDValue &Op) { return Op.isUndef(); })) {
+             [](const SDValue &Op) { return Op.isUndefOrPoison(); })) {
     SDValue In = N->getOperand(0);
     assert(In.getValueType().isVector() && "Must concat vectors");
 
@@ -24716,7 +24723,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
   // fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...))
   // -> (BUILD_VECTOR A, B, ..., C, D, ...)
   auto IsBuildVectorOrUndef = [](const SDValue &Op) {
-    return Op.isUndef() || ISD::BUILD_VECTOR == Op.getOpcode();
+    return Op.isUndefOrPoison() || ISD::BUILD_VECTOR == Op.getOpcode();
   };
   if (llvm::all_of(N->ops(), IsBuildVectorOrUndef)) {
     SmallVector<SDValue, 8> Opnds;
@@ -24740,7 +24747,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
       EVT OpVT = Op.getValueType();
       unsigned NumElts = OpVT.getVectorNumElements();
 
-      if (Op.isUndef())
+      if (Op.isUndefOrPoison())
         Opnds.append(NumElts, DAG.getUNDEF(MinVT));
 
       if (ISD::BUILD_VECTOR == Op.getOpcode()) {
@@ -24795,7 +24802,7 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
     SDValue Op = N->getOperand(i);
 
-    if (Op.isUndef())
+    if (Op.isUndefOrPoison())
       continue;
 
     // Check if this is the identity extract:
@@ -25154,7 +25161,7 @@ static SDValue foldExtractSubvectorFromShuffleVector(SDNode *N,
 
     SDValue Op = WideShuffleVector->getOperand(WideShufOpIdx);
 
-    if (Op.isUndef()) {
+    if (Op.isUndefOrPoison()) {
       // Picking from an undef operand. Let's adjust mask instead.
       NewMask.emplace_back(-1);
       continue;
@@ -25233,7 +25240,7 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode *N) {
   SDLoc DL(N);
 
   // Extract from UNDEF is UNDEF.
-  if (V.isUndef())
+  if (V.isUndefOrPoison())
     return DAG.getUNDEF(NVT);
 
   if (TLI.isOperationLegalOrCustomOrPromote(ISD::LOAD, NVT))
@@ -25443,7 +25450,8 @@ static SDValue foldShuffleOfConcatUndefs(ShuffleVectorSDNode *Shuf,
   SDValue N0 = Shuf->getOperand(0), N1 = Shuf->getOperand(1);
   if (N0.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
       N1.getOpcode() != ISD::CONCAT_VECTORS || N1.getNumOperands() != 2 ||
-      !N0.getOperand(1).isUndef() || !N1.getOperand(1).isUndef())
+      !N0.getOperand(1).isUndefOrPoison() ||
+      !N1.getOperand(1).isUndefOrPoison())
     return SDValue();
 
   // Split the wide shuffle mask into halves. Any mask element that is accessing
@@ -25505,7 +25513,7 @@ static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) {
   // Special case: shuffle(concat(A,B)) can be more efficiently represented
   // as concat(shuffle(A,B),UNDEF) if the shuffle doesn't set any of the high
   // half vector elements.
-  if (NumElemsPerConcat * 2 == NumElts && N1.isUndef() &&
+  if (NumElemsPerConcat * 2 == NumElts && N1.isUndefOrPoison() &&
       llvm::all_of(Mask.slice(NumElemsPerConcat, NumElemsPerConcat),
                    IsUndefMaskElt)) {
     N0 = DAG.getVectorShuffle(ConcatVT, SDLoc(N), N0.getOperand(0),
@@ -25578,7 +25586,7 @@ static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN,
 
   // If only one of N1,N2 is constant, bail out if it is not ALL_ZEROS as
   // discussed above.
-  if (!N1.isUndef()) {
+  if (!N1.isUndefOrPoison()) {
     if (!N1->hasOneUse())
       return SDValue();
 
@@ -25621,7 +25629,7 @@ static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN,
     // generating a splat; semantically, this is fine, but it's likely to
     // generate low-quality code if the target can't reconstruct an appropriate
     // shuffle.
-    if (!Op.isUndef() && !isIntOrFPConstant(Op))
+    if (!Op.isUndefOrPoison() && !isIntOrFPConstant(Op))
       if (!IsSplat && !DuplicateOps.insert(Op).second)
         return SDValue();
 
@@ -25636,10 +25644,11 @@ static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN,
       SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
   if (SVT != VT.getScalarType())
     for (SDValue &Op : Ops)
-      Op = Op.isUndef() ? DAG.getUNDEF(SVT)
-                        : (TLI.isZExtFree(Op.getValueType(), SVT)
-                               ? DAG.getZExtOrTrunc(Op, SDLoc(SVN), SVT)
-                               : DAG.getSExtOrTrunc(Op, SDLoc(SVN), SVT));
+      Op = Op.isUndefOrPoison()
+               ? DAG.getUNDEF(SVT)
+               : (TLI.isZExtFree(Op.getValueType(), SVT)
+                      ? DAG.getZExtOrTrunc(Op, SDLoc(SVN), SVT)
+                      : DAG.getSExtOrTrunc(Op, SDLoc(SVN), SVT));
   return DAG.getBuildVector(VT, SDLoc(SVN), Ops);
 }
 
@@ -25914,7 +25923,7 @@ static SDValue combineShuffleOfSplatVal(ShuffleVectorSDNode *Shuf,
   EVT VT = Shuf->getValueType(0);
   unsigned NumElts = VT.getVectorNumElements();
 
-  if (!Shuf->getOperand(1).isUndef())
+  if (!Shuf->getOperand(1).isUndefOrPoison())
     return SDValue();
 
   // See if this unary non-splat shuffle actually *is* a splat shuffle,
@@ -26021,11 +26030,11 @@ static SDValue combineShuffleOfBitcast(ShuffleVectorSDNode *SVN,
     return SDValue();
   EVT InVT = Op0.getOperand(0).getValueType();
   if (!InVT.isVector() ||
-      (!Op1.isUndef() && (Op1.getOpcode() != ISD::BITCAST ||
-                          Op1.getOperand(0).getValueType() != InVT)))
+      (!Op1.isUndefOrPoison() && (Op1.getOpcode() != ISD::BITCAST ||
+                                  Op1.getOperand(0).getValueType() != InVT)))
     return SDValue();
   if (isAnyConstantBuildVector(Op0.getOperand(0)) &&
-      (Op1.isUndef() || isAnyConstantBuildVector(Op1.getOperand(0))))
+      (Op1.isUndefOrPoison() || isAnyConstantBuildVector(Op1.getOperand(0))))
     return SDValue();
 
   int VTLanes = VT.getVectorNumElements();
@@ -26050,7 +26059,7 @@ static SDValue combineShuffleOfBitcast(ShuffleVectorSDNode *SVN,
   // original type.
   SDLoc DL(SVN);
   Op0 = Op0.getOperand(0);
-  Op1 = Op1.isUndef() ? DAG.getUNDEF(InVT) : Op1.getOperand(0);
+  Op1 = Op1.isUndefOrPoison() ? DAG.getUNDEF(InVT) : Op1.getOperand(0);
   SDValue NewShuf = DAG.getVectorShuffle(InVT, DL, Op0, Op1, NewMask);
   return DAG.getBitcast(VT, NewShuf);
 }
@@ -26059,10 +26068,10 @@ static SDValue combineShuffleOfBitcast(ShuffleVectorSDNode *SVN,
 /// shuf (shuf X, undef, InnerMask), undef, OuterMask --> splat X
 static SDValue formSplatFromShuffles(ShuffleVectorSDNode *OuterShuf,
                                      SelectionDAG &DAG) {
-  if (!OuterShuf->getOperand(1).isUndef())
+  if (!OuterShuf->getOperand(1).isUndefOrPoison())
     return SDValue();
   auto *InnerShuf = dyn_cast<ShuffleVectorSDNode>(OuterShuf->getOperand(0));
-  if (!InnerShuf || !InnerShuf->getOperand(1).isUndef())
+  if (!InnerShuf || !InnerShuf->getOperand(1).isUndefOrPoison())
     return SDValue();
 
   ArrayRef<int> OuterMask = OuterShuf->getMask();
@@ -26190,7 +26199,7 @@ static SDValue replaceShuffleOfInsert(ShuffleVectorSDNode *Shuf,
 static SDValue simplifyShuffleOfShuffle(ShuffleVectorSDNode *Shuf) {
   // shuf (shuf0 X, Y, Mask0), undef, Mask
   auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(Shuf->getOperand(0));
-  if (!Shuf0 || !Shuf->getOperand(1).isUndef())
+  if (!Shuf0 || !Shuf->getOperand(1).isUndefOrPoison())
     return SDValue();
 
   ArrayRef<int> Mask = Shuf->getMask();
@@ -26221,7 +26230,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
   assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG");
 
   // Canonicalize shuffle undef, undef -> undef
-  if (N0.isUndef() && N1.isUndef())
+  if (N0.isUndefOrPoison() && N1.isUndefOrPoison())
     return DAG.getUNDEF(VT);
 
   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
@@ -26232,11 +26241,11 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
                                 createUnaryMask(SVN->getMask(), NumElts));
 
   // Canonicalize shuffle undef, v -> v, undef.  Commute the shuffle mask.
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getCommutedVectorShuffle(*SVN);
 
   // Remove references to rhs if it is undef
-  if (N1.isUndef()) {
+  if (N1.isUndefOrPoison()) {
     bool Changed = false;
     SmallVector<int, 8> NewMask;
     for (unsigned i = 0; i != NumElts; ++i) {
@@ -26329,7 +26338,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
       SDValue Base;
       bool AllSame = true;
       for (unsigned i = 0; i != NumElts; ++i) {
-        if (!V->getOperand(i).isUndef()) {
+        if (!V->getOperand(i).isUndefOrPoison()) {
           Base = V->getOperand(i);
           break;
         }
@@ -26378,11 +26387,10 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
   if (SDValue V = combineTruncationShuffle(SVN, DAG))
     return V;
 
-  if (N0.getOpcode() == ISD::CONCAT_VECTORS &&
-      Level < AfterLegalizeVectorOps &&
-      (N1.isUndef() ||
-      (N1.getOpcode() == ISD::CONCAT_VECTORS &&
-       N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) {
+  if (N0.getOpcode() == ISD::CONCAT_VECTORS && Level < AfterLegalizeVectorOps &&
+      (N1.isUndefOrPoison() ||
+       (N1.getOpcode() == ISD::CONCAT_VECTORS &&
+        N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) {
     if (SDValue V = partitionShuffleOfConcats(N, DAG))
       return V;
   }
@@ -26390,9 +26398,8 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
   // A shuffle of a concat of the same narrow vector can be reduced to use
   // only low-half elements of a concat with undef:
   // shuf (concat X, X), undef, Mask --> shuf (concat X, undef), undef, Mask'
-  if (N0.getOpcode() == ISD::CONCAT_VECTORS && N1.isUndef() &&
-      N0.getNumOperands() == 2 &&
-      N0.getOperand(0) == N0.getOperand(1)) {
+  if (N0.getOpcode() == ISD::CONCAT_VECTORS && N1.isUndefOrPoison() &&
+      N0.getNumOperands() == 2 && N0.getOperand(0) == N0.getOperand(1)) {
     int HalfNumElts = (int)NumElts / 2;
     SmallVector<int, 8> NewMask;
     for (unsigned i = 0; i != NumElts; ++i) {
@@ -26539,7 +26546,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
   // attempt to merge the 2 shuffles and suitably bitcast the inputs/output
   // back to their original types.
   if (N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() &&
-      N1.isUndef() && Level < AfterLegalizeVectorOps &&
+      N1.isUndefOrPoison() && Level < AfterLegalizeVectorOps &&
       TLI.isTypeLegal(VT)) {
 
     SDValue BC0 = peekThroughOneUseBitcasts(N0);
@@ -26641,7 +26648,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
       }
 
       // Simple case where 'CurrentVec' is UNDEF.
-      if (CurrentVec.isUndef()) {
+      if (CurrentVec.isUndefOrPoison()) {
         Mask.push_back(-1);
         continue;
       }
@@ -26675,7 +26682,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
         SDValue InnerVec = (InnerIdx < (int)NumElts)
                                ? CurrentSVN->getOperand(0)
                                : CurrentSVN->getOperand(1);
-        if (InnerVec.isUndef()) {
+        if (InnerVec.isUndefOrPoison()) {
           Mask.push_back(-1);
           continue;
         }
@@ -26727,7 +26734,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
       SDValue SV0 = N1->getOperand(0);
       SDValue SV1 = N1->getOperand(1);
       bool HasSameOp0 = N0 == SV0;
-      bool IsSV1Undef = SV1.isUndef();
+      bool IsSV1Undef = SV1.isUndefOrPoison();
       if (HasSameOp0 || IsSV1Undef || N0 == SV1)
         // Commute the operands of this shuffle so merging below will trigger.
         return DAG.getCommutedVectorShuffle(*SVN);
@@ -26779,13 +26786,13 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
     // shuffle(bop(shuffle(x,y),shuffle(z,w)),bop(shuffle(a,b),shuffle(c,d)))
     unsigned SrcOpcode = N0.getOpcode();
     if (TLI.isBinOp(SrcOpcode) && N->isOnlyUserOf(N0.getNode()) &&
-        (N1.isUndef() ||
+        (N1.isUndefOrPoison() ||
          (SrcOpcode == N1.getOpcode() && N->isOnlyUserOf(N1.getNode())))) {
       // Get binop source ops, or just pass on the undef.
       SDValue Op00 = N0.getOperand(0);
       SDValue Op01 = N0.getOperand(1);
-      SDValue Op10 = N1.isUndef() ? N1 : N1.getOperand(0);
-      SDValue Op11 = N1.isUndef() ? N1 : N1.getOperand(1);
+      SDValue Op10 = N1.isUndefOrPoison() ? N1 : N1.getOperand(0);
+      SDValue Op11 = N1.isUndefOrPoison() ? N1 : N1.getOperand(1);
       // TODO: We might be able to relax the VT check but we don't currently
       // have any isBinOp() that has different result/ops VTs so play safe until
       // we have test coverage.
@@ -26966,13 +26973,13 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
   uint64_t InsIdx = N->getConstantOperandVal(2);
 
   // If inserting an UNDEF, just return the original vector.
-  if (N1.isUndef())
+  if (N1.isUndefOrPoison())
     return N0;
 
   // If this is an insert of an extracted vector into an undef vector, we can
   // just use the input to the extract if the types match, and can simplify
   // in some cases even if they don't.
-  if (N0.isUndef() && N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
+  if (N0.isUndefOrPoison() && N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
       N1.getOperand(1) == N2) {
     EVT SrcVT = N1.getOperand(0).getValueType();
     if (SrcVT == VT)
@@ -27000,7 +27007,7 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
 
   // Simplify scalar inserts into an undef vector:
   // insert_subvector undef, (splat X), N2 -> splat X
-  if (N0.isUndef() && N1.getOpcode() == ISD::SPLAT_VECTOR)
+  if (N0.isUndefOrPoison() && N1.getOpcode() == ISD::SPLAT_VECTOR)
     if (DAG.isConstantValueOfAnyType(N1.getOperand(0)) || N1.hasOneUse())
       return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, N1.getOperand(0));
 
@@ -27008,7 +27015,7 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
   // number of elements, just use the bitcast input of the extract.
   // i.e. INSERT_SUBVECTOR UNDEF (BITCAST N1) N2 ->
   //        BITCAST (INSERT_SUBVECTOR UNDEF N1 N2)
-  if (N0.isUndef() && N1.getOpcode() == ISD::BITCAST &&
+  if (N0.isUndefOrPoison() && N1.getOpcode() == ISD::BITCAST &&
       N1.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
       N1.getOperand(0).getOperand(1) == N2 &&
       N1.getOperand(0).getOperand(0).getValueType().getVectorElementCount() ==
@@ -27048,8 +27055,8 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
   // Eliminate an intermediate insert into an undef vector:
   // insert_subvector undef, (insert_subvector undef, X, 0), 0 -->
   // insert_subvector undef, X, 0
-  if (N0.isUndef() && N1.getOpcode() == ISD::INSERT_SUBVECTOR &&
-      N1.getOperand(0).isUndef() && isNullConstant(N1.getOperand(2)) &&
+  if (N0.isUndefOrPoison() && N1.getOpcode() == ISD::INSERT_SUBVECTOR &&
+      N1.getOperand(0).isUndefOrPoison() && isNullConstant(N1.getOperand(2)) &&
       isNullConstant(N2))
     return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0,
                        N1.getOperand(1), N2);
@@ -27057,13 +27064,13 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
   // Push subvector bitcasts to the output, adjusting the index as we go.
   // insert_subvector(bitcast(v), bitcast(s), c1)
   // -> bitcast(insert_subvector(v, s, c2))
-  if ((N0.isUndef() || N0.getOpcode() == ISD::BITCAST) &&
+  if ((N0.isUndefOrPoison() || N0.getOpcode() == ISD::BITCAST) &&
       N1.getOpcode() == ISD::BITCAST) {
     SDValue N0Src = peekThroughBitcasts(N0);
     SDValue N1Src = peekThroughBitcasts(N1);
     EVT N0SrcSVT = N0Src.getValueType().getScalarType();
     EVT N1SrcSVT = N1Src.getValueType().getScalarType();
-    if ((N0.isUndef() || N0SrcSVT == N1SrcSVT) &&
+    if ((N0.isUndefOrPoison() || N0SrcSVT == N1SrcSVT) &&
         N0Src.getValueType().isVector() && N1Src.getValueType().isVector()) {
       EVT NewVT;
       SDLoc DL(N);
@@ -27209,9 +27216,9 @@ SDValue DAGCombiner::visitVECREDUCE(SDNode *N) {
     SDValue Vec = N0.getOperand(0);
     SDValue Subvec = N0.getOperand(1);
     if ((Opcode == ISD::VECREDUCE_OR &&
-         (N0.getOperand(0).isUndef() || isNullOrNullSplat(Vec))) ||
+         (N0.getOperand(0).isUndefOrPoison() || isNullOrNullSplat(Vec))) ||
         (Opcode == ISD::VECREDUCE_AND &&
-         (N0.getOperand(0).isUndef() || isAllOnesOrAllOnesSplat(Vec))))
+         (N0.getOperand(0).isUndefOrPoison() || isAllOnesOrAllOnesSplat(Vec))))
       return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), Subvec);
   }
 
@@ -27330,7 +27337,8 @@ SDValue DAGCombiner::visitGET_FPENV_MEM(SDNode *N) {
     return SDValue();
   }
   if (!LdNode || !LdNode->isSimple() || LdNode->isIndexed() ||
-      !LdNode->getOffset().isUndef() || LdNode->getMemoryVT() != MemVT ||
+      !LdNode->getOffset().isUndefOrPoison() ||
+      LdNode->getMemoryVT() != MemVT ||
       !LdNode->getChain().reachesChainWithoutSideEffects(SDValue(N, 0)))
     return SDValue();
 
@@ -27348,7 +27356,8 @@ SDValue DAGCombiner::visitGET_FPENV_MEM(SDNode *N) {
     }
   }
   if (!StNode || !StNode->isSimple() || StNode->isIndexed() ||
-      !StNode->getOffset().isUndef() || StNode->getMemoryVT() != MemVT ||
+      !StNode->getOffset().isUndefOrPoison() ||
+      StNode->getMemoryVT() != MemVT ||
       !StNode->getChain().reachesChainWithoutSideEffects(SDValue(LdNode, 1)))
     return SDValue();
 
@@ -27379,7 +27388,8 @@ SDValue DAGCombiner::visitSET_FPENV_MEM(SDNode *N) {
     return SDValue();
   }
   if (!StNode || !StNode->isSimple() || StNode->isIndexed() ||
-      !StNode->getOffset().isUndef() || StNode->getMemoryVT() != MemVT ||
+      !StNode->getOffset().isUndefOrPoison() ||
+      StNode->getMemoryVT() != MemVT ||
       !Chain.reachesChainWithoutSideEffects(SDValue(StNode, 0)))
     return SDValue();
 
@@ -27388,7 +27398,8 @@ SDValue DAGCombiner::visitSET_FPENV_MEM(SDNode *N) {
   SDValue StValue = StNode->getValue();
   auto *LdNode = dyn_cast<LoadSDNode>(StValue);
   if (!LdNode || !LdNode->isSimple() || LdNode->isIndexed() ||
-      !LdNode->getOffset().isUndef() || LdNode->getMemoryVT() != MemVT ||
+      !LdNode->getOffset().isUndefOrPoison() ||
+      LdNode->getMemoryVT() != MemVT ||
       !StNode->getChain().reachesChainWithoutSideEffects(SDValue(LdNode, 1)))
     return SDValue();
 
@@ -27437,7 +27448,7 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
       SDValue Elt = RHS.getOperand(EltIdx);
       // X & undef --> 0 (not undef). So this lane must be converted to choose
       // from the zero constant vector (same as if the element had all 0-bits).
-      if (Elt.isUndef()) {
+      if (Elt.isUndefOrPoison()) {
         Indices.push_back(i + NumSubElts);
         continue;
       }
@@ -27534,8 +27545,10 @@ static SDValue scalarizeBinOpOfSplats(SDNode *N, SelectionDAG &DAG,
   // If all lanes but 1 are undefined, no need to splat the scalar result.
   // TODO: Keep track of undefs and use that info in the general case.
   if (N0.getOpcode() == ISD::BUILD_VECTOR && N0.getOpcode() == N1.getOpcode() &&
-      count_if(N0->ops(), [](SDValue V) { return !V.isUndef(); }) == 1 &&
-      count_if(N1->ops(), [](SDValue V) { return !V.isUndef(); }) == 1) {
+      count_if(N0->ops(), [](SDValue V) { return !V.isUndefOrPoison(); }) ==
+          1 &&
+      count_if(N1->ops(), [](SDValue V) { return !V.isUndefOrPoison(); }) ==
+          1) {
     // bo (build_vec ..undef, X, undef...), (build_vec ..undef, Y, undef...) -->
     // build_vec ..undef, (bo X, Y), undef...
     SmallVector<SDValue, 8> Ops(VT.getVectorNumElements(), DAG.getUNDEF(EltVT));
@@ -27601,7 +27614,8 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N, const SDLoc &DL) {
     auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(LHS);
     auto *Shuf1 = dyn_cast<ShuffleVectorSDNode>(RHS);
     if (Shuf0 && Shuf1 && Shuf0->getMask().equals(Shuf1->getMask()) &&
-        LHS.getOperand(1).isUndef() && RHS.getOperand(1).isUndef() &&
+        LHS.getOperand(1).isUndefOrPoison() &&
+        RHS.getOperand(1).isUndefOrPoison() &&
         (LHS.hasOneUse() || RHS.hasOneUse() || LHS == RHS)) {
       SDValue NewBinOp = DAG.getNode(Opcode, DL, VT, LHS.getOperand(0),
                                      RHS.getOperand(0), Flags);
@@ -27616,7 +27630,7 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N, const SDLoc &DL) {
     // of an inserted scalar because that may be optimized better by
     // load-folding or other target-specific behaviors.
     if (isConstOrConstSplat(RHS) && Shuf0 && all_equal(Shuf0->getMask()) &&
-        Shuf0->hasOneUse() && Shuf0->getOperand(1).isUndef() &&
+        Shuf0->hasOneUse() && Shuf0->getOperand(1).isUndefOrPoison() &&
         Shuf0->getOperand(0).getOpcode() != ISD::INSERT_VECTOR_ELT) {
       // binop (splat X), (splat C) --> splat (binop X, C)
       SDValue X = Shuf0->getOperand(0);
@@ -27625,7 +27639,7 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N, const SDLoc &DL) {
                                   Shuf0->getMask());
     }
     if (isConstOrConstSplat(LHS) && Shuf1 && all_equal(Shuf1->getMask()) &&
-        Shuf1->hasOneUse() && Shuf1->getOperand(1).isUndef() &&
+        Shuf1->hasOneUse() && Shuf1->getOperand(1).isUndefOrPoison() &&
         Shuf1->getOperand(0).getOpcode() != ISD::INSERT_VECTOR_ELT) {
       // binop (splat C), (splat X) --> splat (binop C, X)
       SDValue X = Shuf1->getOperand(0);
@@ -27639,8 +27653,10 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N, const SDLoc &DL) {
   // the binary operation ahead of insertion may allow using a narrower vector
   // instruction that has better performance than the wide version of the op:
   // VBinOp (ins undef, X, Z), (ins undef, Y, Z) --> ins VecC, (VBinOp X, Y), Z
-  if (LHS.getOpcode() == ISD::INSERT_SUBVECTOR && LHS.getOperand(0).isUndef() &&
-      RHS.getOpcode() == ISD::INSERT_SUBVECTOR && RHS.getOperand(0).isUndef() &&
+  if (LHS.getOpcode() == ISD::INSERT_SUBVECTOR &&
+      LHS.getOperand(0).isUndefOrPoison() &&
+      RHS.getOpcode() == ISD::INSERT_SUBVECTOR &&
+      RHS.getOperand(0).isUndefOrPoison() &&
       LHS.getOperand(2) == RHS.getOperand(2) &&
       (LHS.hasOneUse() || RHS.hasOneUse())) {
     SDValue X = LHS.getOperand(1);
@@ -27662,7 +27678,7 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N, const SDLoc &DL) {
   auto ConcatWithConstantOrUndef = [](SDValue Concat) {
     return Concat.getOpcode() == ISD::CONCAT_VECTORS &&
            all_of(drop_begin(Concat->ops()), [](const SDValue &Op) {
-             return Op.isUndef() ||
+             return Op.isUndefOrPoison() ||
                     ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
            });
   };
@@ -29059,7 +29075,7 @@ bool DAGCombiner::parallelizeChainedStores(StoreSDNode *St) {
     return false;
 
   // Do not handle stores to undef base pointers.
-  if (BasePtr.getBase().isUndef())
+  if (BasePtr.getBase().isUndefOrPoison())
     return false;
 
   // Do not handle stores to opaque types
@@ -29171,7 +29187,7 @@ bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) {
     return false;
 
   // Do not handle stores to undef base pointers.
-  if (BasePtr.getBase().isUndef())
+  if (BasePtr.getBase().isUndefOrPoison())
     return false;
 
   // Directly improve a chain of disjoint stores starting at St.
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 7a84d695ccc5a57..fe45e0a7d794c99 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -1564,7 +1564,8 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
   // Store (in the right endianness) the elements to memory.
   for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
     // Ignore undef elements.
-    if (Node->getOperand(i).isUndef()) continue;
+    if (Node->getOperand(i).isUndefOrPoison())
+      continue;
 
     unsigned Offset = TypeByteSize*i;
 
@@ -1885,7 +1886,7 @@ ExpandBVWithShuffles(SDNode *Node, SelectionDAG &DAG,
                                                               NewIntermedVals;
     for (unsigned i = 0; i < NumElems; ++i) {
       SDValue V = Node->getOperand(i);
-      if (V.isUndef())
+      if (V.isUndefOrPoison())
         continue;
 
       SDValue Vec;
@@ -1977,7 +1978,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
   bool isConstant = true;
   for (unsigned i = 0; i < NumElems; ++i) {
     SDValue V = Node->getOperand(i);
-    if (V.isUndef())
+    if (V.isUndefOrPoison())
       continue;
     if (i > 0)
       isOnlyLowElement = false;
@@ -2020,7 +2021,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
                                         CI->getZExtValue()));
         }
       } else {
-        assert(Node->getOperand(i).isUndef());
+        assert(Node->getOperand(i).isUndefOrPoison());
         Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext());
         CV.push_back(UndefValue::get(OpNTy));
       }
@@ -2037,7 +2038,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
 
   SmallSet<SDValue, 16> DefinedValues;
   for (unsigned i = 0; i < NumElems; ++i) {
-    if (Node->getOperand(i).isUndef())
+    if (Node->getOperand(i).isUndefOrPoison())
       continue;
     DefinedValues.insert(Node->getOperand(i));
   }
@@ -2047,7 +2048,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
       SmallVector<int, 8> ShuffleVec(NumElems, -1);
       for (unsigned i = 0; i < NumElems; ++i) {
         SDValue V = Node->getOperand(i);
-        if (V.isUndef())
+        if (V.isUndefOrPoison())
           continue;
         ShuffleVec[i] = V == Value1 ? 0 : NumElems;
       }
@@ -4020,7 +4021,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
                          Node->getOperand(2));
     } else {
       // We test only the i1 bit.  Skip the AND if UNDEF or another AND.
-      if (Tmp2.isUndef() ||
+      if (Tmp2.isUndefOrPoison() ||
           (Tmp2.getOpcode() == ISD::AND && isOneConstant(Tmp2.getOperand(1))))
         Tmp3 = Tmp2;
       else
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 1c28722f3f0a284..71f100bfa034343 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -164,7 +164,6 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
     case ISD::STRICT_UINT_TO_FP:
     case ISD::SINT_TO_FP:
     case ISD::UINT_TO_FP:  R = SoftenFloatRes_XINT_TO_FP(N); break;
-    case ISD::POISON:
     case ISD::UNDEF:       R = SoftenFloatRes_UNDEF(N); break;
     case ISD::VAARG:       R = SoftenFloatRes_VAARG(N); break;
     case ISD::VECREDUCE_FADD:
@@ -1475,7 +1474,6 @@ void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
     report_fatal_error("Do not know how to expand the result of this "
                        "operator!");
     // clang-format off
-  case ISD::POISON:
   case ISD::UNDEF:        SplitRes_UNDEF(N, Lo, Hi); break;
   case ISD::SELECT:       SplitRes_Select(N, Lo, Hi); break;
   case ISD::SELECT_CC:    SplitRes_SELECT_CC(N, Lo, Hi); break;
@@ -2785,7 +2783,6 @@ void DAGTypeLegalizer::PromoteFloatResult(SDNode *N, unsigned ResNo) {
 
     case ISD::SINT_TO_FP:
     case ISD::UINT_TO_FP: R = PromoteFloatRes_XINT_TO_FP(N); break;
-    case ISD::POISON:
     case ISD::UNDEF:      R = PromoteFloatRes_UNDEF(N); break;
     case ISD::ATOMIC_SWAP: R = BitcastToInt_ATOMIC_SWAP(N); break;
     case ISD::VECREDUCE_FADD:
@@ -3245,7 +3242,6 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) {
   case ISD::STRICT_UINT_TO_FP:
   case ISD::SINT_TO_FP:
   case ISD::UINT_TO_FP:  R = SoftPromoteHalfRes_XINT_TO_FP(N); break;
-  case ISD::POISON:
   case ISD::UNDEF:       R = SoftPromoteHalfRes_UNDEF(N); break;
   case ISD::ATOMIC_SWAP: R = BitcastToInt_ATOMIC_SWAP(N); break;
   case ISD::VECREDUCE_FADD:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 37aad1e21f7bd2b..5d839be0fb2e16e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -657,7 +657,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_UNDEF(SDNode *N) {
 SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N) {
   // Figure out if the scalar is the LHS or RHS and return it.
   SDValue Arg = N->getOperand(2).getOperand(0);
-  if (Arg.isUndef())
+  if (Arg.isUndefOrPoison())
     return DAG.getUNDEF(N->getValueType(0).getVectorElementType());
   unsigned Op = !cast<ConstantSDNode>(Arg)->isZero();
   return GetScalarizedVector(N->getOperand(Op));
@@ -2129,7 +2129,8 @@ void DAGTypeLegalizer::SplitVecRes_VP_LOAD(VPLoadSDNode *LD, SDValue &Lo,
   SDValue Ch = LD->getChain();
   SDValue Ptr = LD->getBasePtr();
   SDValue Offset = LD->getOffset();
-  assert(Offset.isUndef() && "Unexpected indexed variable-length load offset");
+  assert(Offset.isUndefOrPoison() &&
+         "Unexpected indexed variable-length load offset");
   Align Alignment = LD->getOriginalAlign();
   SDValue Mask = LD->getMask();
   SDValue EVL = LD->getVectorLength();
@@ -2203,7 +2204,7 @@ void DAGTypeLegalizer::SplitVecRes_VP_STRIDED_LOAD(VPStridedLoadSDNode *SLD,
                                                    SDValue &Lo, SDValue &Hi) {
   assert(SLD->isUnindexed() &&
          "Indexed VP strided load during type legalization!");
-  assert(SLD->getOffset().isUndef() &&
+  assert(SLD->getOffset().isUndefOrPoison() &&
          "Unexpected indexed variable-length load offset");
 
   SDLoc DL(SLD);
@@ -2290,7 +2291,7 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
   SDValue Ch = MLD->getChain();
   SDValue Ptr = MLD->getBasePtr();
   SDValue Offset = MLD->getOffset();
-  assert(Offset.isUndef() && "Unexpected indexed masked load offset");
+  assert(Offset.isUndefOrPoison() && "Unexpected indexed masked load offset");
   SDValue Mask = MLD->getMask();
   SDValue PassThru = MLD->getPassThru();
   Align Alignment = MLD->getOriginalAlign();
@@ -2517,7 +2518,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_COMPRESS(SDNode *N, SDValue &Lo,
                        MachinePointerInfo::getUnknownStack(MF));
 
   SDValue Compressed = DAG.getLoad(VecVT, DL, Chain, StackPtr, PtrInfo);
-  if (!Passthru.isUndef()) {
+  if (!Passthru.isUndefOrPoison()) {
     Compressed =
         DAG.getNode(ISD::VSELECT, DL, VecVT, Mask, Compressed, Passthru);
   }
@@ -2799,7 +2800,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
         if (Idx == PoisonMaskElem)
           continue;
         unsigned SrcRegIdx = Idx / NewElts;
-        if (Inputs[SrcRegIdx].isUndef()) {
+        if (Inputs[SrcRegIdx].isUndefOrPoison()) {
           Idx = PoisonMaskElem;
           continue;
         }
@@ -2831,7 +2832,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
       if (Idx == PoisonMaskElem)
         continue;
       unsigned SrcRegIdx = Idx / NewElts;
-      if (Inputs[SrcRegIdx].isUndef()) {
+      if (Inputs[SrcRegIdx].isUndefOrPoison()) {
         Idx = PoisonMaskElem;
         continue;
       }
@@ -2839,7 +2840,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
           getTypeAction(Inputs[SrcRegIdx].getValueType());
       if (Inputs[SrcRegIdx].getOpcode() == ISD::CONCAT_VECTORS &&
           Inputs[SrcRegIdx].getNumOperands() == 2 &&
-          !Inputs[SrcRegIdx].getOperand(1).isUndef() &&
+          !Inputs[SrcRegIdx].getOperand(1).isUndefOrPoison() &&
           (TypeAction == TargetLowering::TypeLegal ||
            TypeAction == TargetLowering::TypeWidenVector))
         UsedSubVector.set(2 * SrcRegIdx + (Idx % NewElts) / (NewElts / 2));
@@ -2897,11 +2898,11 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
         if (Shuffle->getOperand(0).getValueType() != NewVT)
           continue;
         int Op = -1;
-        if (!Inputs[I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
-            !Shuffle->isSplat()) {
+        if (!Inputs[I].hasOneUse() &&
+            Shuffle->getOperand(1).isUndefOrPoison() && !Shuffle->isSplat()) {
           Op = 0;
         } else if (!Inputs[I].hasOneUse() &&
-                   !Shuffle->getOperand(1).isUndef()) {
+                   !Shuffle->getOperand(1).isUndefOrPoison()) {
           // Find the only used operand, if possible.
           for (int &Idx : Mask) {
             if (Idx == PoisonMaskElem)
@@ -2928,7 +2929,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
         if (Op < 0) {
           // Try to check if one of the shuffle operands is used already.
           for (int OpIdx = 0; OpIdx < 2; ++OpIdx) {
-            if (Shuffle->getOperand(OpIdx).isUndef())
+            if (Shuffle->getOperand(OpIdx).isUndefOrPoison())
               continue;
             auto *It = find(Inputs, Shuffle->getOperand(OpIdx));
             if (It == std::end(Inputs))
@@ -2985,7 +2986,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
     for (const auto &I : Inputs) {
       if (IsConstant(I))
         UniqueConstantInputs.insert(I);
-      else if (!I.isUndef())
+      else if (!I.isUndefOrPoison())
         UniqueInputs.insert(I);
     }
     // Adjust mask in case of reused inputs. Also, need to insert constant
@@ -2998,7 +2999,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
         if (Idx == PoisonMaskElem)
           continue;
         unsigned SrcRegIdx = Idx / NewElts;
-        if (Inputs[SrcRegIdx].isUndef()) {
+        if (Inputs[SrcRegIdx].isUndefOrPoison()) {
           Idx = PoisonMaskElem;
           continue;
         }
@@ -3764,7 +3765,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_VP_STORE(VPStoreSDNode *N, unsigned OpNo) {
   SDValue Ch = N->getChain();
   SDValue Ptr = N->getBasePtr();
   SDValue Offset = N->getOffset();
-  assert(Offset.isUndef() && "Unexpected VP store offset");
+  assert(Offset.isUndefOrPoison() && "Unexpected VP store offset");
   SDValue Mask = N->getMask();
   SDValue EVL = N->getVectorLength();
   SDValue Data = N->getValue();
@@ -3841,7 +3842,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_VP_STORE(VPStoreSDNode *N, unsigned OpNo) {
 SDValue DAGTypeLegalizer::SplitVecOp_VP_STRIDED_STORE(VPStridedStoreSDNode *N,
                                                       unsigned OpNo) {
   assert(N->isUnindexed() && "Indexed vp_strided_store of a vector?");
-  assert(N->getOffset().isUndef() && "Unexpected VP strided store offset");
+  assert(N->getOffset().isUndefOrPoison() &&
+         "Unexpected VP strided store offset");
 
   SDLoc DL(N);
 
@@ -3918,7 +3920,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_MSTORE(MaskedStoreSDNode *N,
   SDValue Ch  = N->getChain();
   SDValue Ptr = N->getBasePtr();
   SDValue Offset = N->getOffset();
-  assert(Offset.isUndef() && "Unexpected indexed masked store offset");
+  assert(Offset.isUndefOrPoison() && "Unexpected indexed masked store offset");
   SDValue Mask = N->getMask();
   SDValue Data = N->getValue();
   Align Alignment = N->getOriginalAlign();
@@ -5744,7 +5746,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
       // The inputs and the result are widen to the same value.
       unsigned i;
       for (i=1; i < NumOperands; ++i)
-        if (!N->getOperand(i).isUndef())
+        if (!N->getOperand(i).isUndefOrPoison())
           break;
 
       if (i == NumOperands)
@@ -6170,7 +6172,7 @@ static inline bool isSETCCorConvertedSETCC(SDValue N) {
     N = N.getOperand(0);
   else if (N.getOpcode() == ISD::CONCAT_VECTORS) {
     for (unsigned i = 1; i < N->getNumOperands(); ++i)
-      if (!N->getOperand(i)->isUndef())
+      if (!N->getOperand(i)->isUndefOrPoison())
         return false;
     N = N.getOperand(0);
   }
@@ -7011,7 +7013,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_CONCAT_VECTORS(SDNode *N) {
   if (VT == TLI.getTypeToTransformTo(*DAG.getContext(), InVT)) {
     unsigned i;
     for (i = 1; i < NumOperands; ++i)
-      if (!N->getOperand(i).isUndef())
+      if (!N->getOperand(i).isUndefOrPoison())
         break;
 
     if (i == NumOperands)
@@ -7069,7 +7071,8 @@ SDValue DAGTypeLegalizer::WidenVecOp_INSERT_SUBVECTOR(SDNode *N) {
 
   // We need to make sure that the indices are still valid, otherwise we might
   // widen what was previously well-defined to something undefined.
-  if (IndicesValid && InVec.isUndef() && N->getConstantOperandVal(2) == 0)
+  if (IndicesValid && InVec.isUndefOrPoison() &&
+      N->getConstantOperandVal(2) == 0)
     return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, InVec, SubVec,
                        N->getOperand(2));
 
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 6d62760d25dfeda..eb682e32a86e09e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -200,7 +200,7 @@ bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
   unsigned i = 0, e = N->getNumOperands();
 
   // Skip over all of the undef values.
-  while (i != e && N->getOperand(i).isUndef())
+  while (i != e && N->getOperand(i).isUndefOrPoison())
     ++i;
 
   // Do not accept an all-undef vector.
@@ -229,7 +229,7 @@ bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
   // undefs. Even with the above element type twiddling, this should be OK, as
   // the same type legalization should have applied to all the elements.
   for (++i; i != e; ++i)
-    if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
+    if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndefOrPoison())
       return false;
   return true;
 }
@@ -248,7 +248,7 @@ bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
 
   bool IsAllUndef = true;
   for (const SDValue &Op : N->op_values()) {
-    if (Op.isUndef())
+    if (Op.isUndefOrPoison())
       continue;
     IsAllUndef = false;
     // Do not accept build_vectors that aren't all constants or which have non-0
@@ -289,7 +289,7 @@ bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
     return false;
 
   for (const SDValue &Op : N->op_values()) {
-    if (Op.isUndef())
+    if (Op.isUndefOrPoison())
       continue;
     if (!isa<ConstantSDNode>(Op))
       return false;
@@ -302,7 +302,7 @@ bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
     return false;
 
   for (const SDValue &Op : N->op_values()) {
-    if (Op.isUndef())
+    if (Op.isUndefOrPoison())
       continue;
     if (!isa<ConstantFPSDNode>(Op))
       return false;
@@ -332,7 +332,7 @@ bool ISD::isVectorShrinkable(const SDNode *N, unsigned NewEltSize,
     return false;
 
   for (const SDValue &Op : N->op_values()) {
-    if (Op.isUndef())
+    if (Op.isUndefOrPoison())
       continue;
     if (!isa<ConstantSDNode>(Op))
       return false;
@@ -353,11 +353,12 @@ bool ISD::allOperandsUndef(const SDNode *N) {
   // is probably the desired behavior.
   if (N->getNumOperands() == 0)
     return false;
-  return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
+  return all_of(N->op_values(),
+                [](SDValue Op) { return Op.isUndefOrPoison(); });
 }
 
 bool ISD::isFreezeUndef(const SDNode *N) {
-  return N->getOpcode() == ISD::FREEZE && N->getOperand(0).isUndef();
+  return N->getOpcode() == ISD::FREEZE && N->getOperand(0).isUndefOrPoison();
 }
 
 template <typename ConstNodeType>
@@ -375,7 +376,7 @@ bool ISD::matchUnaryPredicateImpl(SDValue Op,
 
   EVT SVT = Op.getValueType().getScalarType();
   for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
-    if (AllowUndefs && Op.getOperand(i).isUndef()) {
+    if (AllowUndefs && Op.getOperand(i).isUndefOrPoison()) {
       if (!Match(nullptr))
         return false;
       continue;
@@ -415,8 +416,8 @@ bool ISD::matchBinaryPredicate(
   for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
     SDValue LHSOp = LHS.getOperand(i);
     SDValue RHSOp = RHS.getOperand(i);
-    bool LHSUndef = AllowUndefs && LHSOp.isUndef();
-    bool RHSUndef = AllowUndefs && RHSOp.isUndef();
+    bool LHSUndef = AllowUndefs && LHSOp.isUndefOrPoison();
+    bool RHSUndef = AllowUndefs && RHSOp.isUndefOrPoison();
     auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
     auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
     if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
@@ -2146,7 +2147,7 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
          "Invalid VECTOR_SHUFFLE");
 
   // Canonicalize shuffle undef, undef -> undef
-  if (N1.isUndef() && N2.isUndef())
+  if (N1.isUndefOrPoison() && N2.isUndefOrPoison())
     return getUNDEF(VT);
 
   // Validate that all indices in Mask are within the range of the elements
@@ -2167,7 +2168,7 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
   }
 
   // Canonicalize shuffle undef, v -> v, undef.  Commute the shuffle mask.
-  if (N1.isUndef())
+  if (N1.isUndefOrPoison())
     commuteShuffle(N1, N2, MaskVec);
 
   if (TLI->hasVectorBlend()) {
@@ -2203,7 +2204,7 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
   // Canonicalize all index into lhs, -> shuffle lhs, undef
   // Canonicalize all index into rhs, -> shuffle rhs, undef
   bool AllLHS = true, AllRHS = true;
-  bool N2Undef = N2.isUndef();
+  bool N2Undef = N2.isUndefOrPoison();
   for (int i = 0; i != NElts; ++i) {
     if (MaskVec[i] >= NElts) {
       if (N2Undef)
@@ -2223,9 +2224,9 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
     commuteShuffle(N1, N2, MaskVec);
   }
   // Reset our undef status after accounting for the mask.
-  N2Undef = N2.isUndef();
+  N2Undef = N2.isUndefOrPoison();
   // Re-check whether both sides ended up undef.
-  if (N1.isUndef() && N2Undef)
+  if (N1.isUndefOrPoison() && N2Undef)
     return getUNDEF(VT);
 
   // If Identity shuffle return that node.
@@ -2251,7 +2252,7 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
       BitVector UndefElements;
       SDValue Splat = BV->getSplatValue(&UndefElements);
       // If this is a splat of an undef, shuffling it is also undef.
-      if (Splat && Splat.isUndef())
+      if (Splat && Splat.isUndefOrPoison())
         return getUNDEF(VT);
 
       bool SameNumElts =
@@ -2845,18 +2846,18 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
     // predicate pass or fail, so we can return undef.
     // Matches behavior in llvm::ConstantFoldCompareInstruction.
     // icmp eq/ne X, undef -> undef.
-    if ((N1.isUndef() || N2.isUndef()) &&
+    if ((N1.isUndefOrPoison() || N2.isUndefOrPoison()) &&
         (Cond == ISD::SETEQ || Cond == ISD::SETNE))
       return GetUndefBooleanConstant();
 
     // If both operands are undef, we can return undef for int comparison.
     // icmp undef, undef -> undef.
-    if (N1.isUndef() && N2.isUndef())
+    if (N1.isUndefOrPoison() && N2.isUndefOrPoison())
       return GetUndefBooleanConstant();
 
     // icmp X, X -> true/false
     // icmp X, undef -> true/false because undef could be X.
-    if (N1.isUndef() || N2.isUndef() || N1 == N2)
+    if (N1.isUndefOrPoison() || N2.isUndefOrPoison() || N1 == N2)
       return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
   }
 
@@ -2929,14 +2930,15 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
     case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
                                              OpVT);
     }
-  } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
+  } else if (N1CFP && OpVT.isSimple() && !N2.isUndefOrPoison()) {
     // Ensure that the constant occurs on the RHS.
     ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
     if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
       return SDValue();
     return getSetCC(dl, VT, N2, N1, SwappedCond);
   } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
-             (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
+             (OpVT.isFloatingPoint() &&
+              (N1.isUndefOrPoison() || N2.isUndefOrPoison()))) {
     // If an operand is known to be a nan (or undef that could be a nan), we can
     // fold it.
     // Choosing NaN for the undef will always make unordered comparison succeed
@@ -3038,7 +3040,7 @@ bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
   // vector types.
   switch (Opcode) {
   case ISD::SPLAT_VECTOR:
-    UndefElts = V.getOperand(0).isUndef()
+    UndefElts = V.getOperand(0).isUndefOrPoison()
                     ? APInt::getAllOnes(DemandedElts.getBitWidth())
                     : APInt(DemandedElts.getBitWidth(), 0);
     return true;
@@ -3084,7 +3086,7 @@ bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
     SDValue Scl;
     for (unsigned i = 0; i != NumElts; ++i) {
       SDValue Op = V.getOperand(i);
-      if (Op.isUndef()) {
+      if (Op.isUndefOrPoison()) {
         UndefElts.setBit(i);
         continue;
       }
@@ -6064,7 +6066,7 @@ static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
          "Incorrect element count in BUILD_VECTOR!");
 
   // BUILD_VECTOR of UNDEFs is UNDEF.
-  if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
+  if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndefOrPoison(); }))
     return DAG.getUNDEF(VT);
 
   // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
@@ -6106,7 +6108,7 @@ static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
     return Ops[0];
 
   // Concat of UNDEFs is UNDEF.
-  if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
+  if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndefOrPoison(); }))
     return DAG.getUNDEF(VT);
 
   // Scan the operands and look for extract operations from a single source
@@ -6145,7 +6147,7 @@ static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
   SmallVector<SDValue, 16> Elts;
   for (SDValue Op : Ops) {
     EVT OpVT = Op.getValueType();
-    if (Op.isUndef())
+    if (Op.isUndefOrPoison())
       Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
     else if (Op.getOpcode() == ISD::BUILD_VECTOR)
       Elts.append(Op->op_begin(), Op->op_end());
@@ -6160,7 +6162,7 @@ static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
 
   if (SVT.bitsGT(VT.getScalarType())) {
     for (SDValue &Op : Elts) {
-      if (Op.isUndef())
+      if (Op.isUndefOrPoison())
         Op = DAG.getUNDEF(SVT);
       else
         Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
@@ -6279,18 +6281,18 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
                                   N1.getValueType().getVectorElementCount()) &&
            "Vector element count mismatch!");
     assert(N1.getValueType().bitsLT(VT) && "Invalid fpext node, dst < src!");
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       return getUNDEF(VT);
     break;
   case ISD::FP_TO_SINT:
   case ISD::FP_TO_UINT:
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       return getUNDEF(VT);
     break;
   case ISD::SINT_TO_FP:
   case ISD::UINT_TO_FP:
     // [us]itofp(undef) = 0, because the result value is bounded.
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       return getConstantFP(0.0, DL, VT);
     break;
   case ISD::SIGN_EXTEND:
@@ -6310,7 +6312,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
         Flags.setNonNeg(N1->getFlags().hasNonNeg());
       return getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
     }
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       // sext(undef) = 0, because the top bits will all be the same.
       return getConstant(0, DL, VT);
 
@@ -6331,7 +6333,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
       Flags.setNonNeg(N1->getFlags().hasNonNeg());
       return getNode(ISD::ZERO_EXTEND, DL, VT, N1.getOperand(0), Flags);
     }
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       // zext(undef) = 0, because the top bits will be zero.
       return getConstant(0, DL, VT);
 
@@ -6373,7 +6375,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
       // (ext (zext x)) -> (zext x)  and  (ext (sext x)) -> (sext x)
       return getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
     }
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       return getUNDEF(VT);
 
     // (ext (trunc x)) -> x
@@ -6408,7 +6410,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
         return getNode(ISD::TRUNCATE, DL, VT, N1.getOperand(0));
       return N1.getOperand(0);
     }
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       return getUNDEF(VT);
     if (OpOpcode == ISD::VSCALE && !NewNodesMustHaveLegalTypes)
       return getVScale(DL, VT,
@@ -6426,14 +6428,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
     break;
   case ISD::ABS:
     assert(VT.isInteger() && VT == N1.getValueType() && "Invalid ABS!");
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       return getConstant(0, DL, VT);
     break;
   case ISD::BSWAP:
     assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BSWAP!");
     assert((VT.getScalarSizeInBits() % 16 == 0) &&
            "BSWAP types must be a multiple of 16 bits!");
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       return getUNDEF(VT);
     // bswap(bswap(X)) -> X.
     if (OpOpcode == ISD::BSWAP)
@@ -6441,7 +6443,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
     break;
   case ISD::BITREVERSE:
     assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BITREVERSE!");
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       return getUNDEF(VT);
     break;
   case ISD::BITCAST:
@@ -6450,7 +6452,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
     if (VT == N1.getValueType()) return N1;   // noop conversion.
     if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
       return getNode(ISD::BITCAST, DL, VT, N1.getOperand(0));
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       return getUNDEF(VT);
     break;
   case ISD::SCALAR_TO_VECTOR:
@@ -6460,7 +6462,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
              N1.getValueType().isInteger() &&
              VT.getVectorElementType().bitsLE(N1.getValueType()))) &&
            "Illegal SCALAR_TO_VECTOR node!");
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       return getUNDEF(VT);
     // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
     if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
@@ -6471,7 +6473,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
     break;
   case ISD::FNEG:
     // Negation of an unknown bag of bits is still completely undefined.
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       return getUNDEF(VT);
 
     if (OpOpcode == ISD::FNEG) // --X -> X
@@ -6652,13 +6654,13 @@ bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
     // zero/undef, the whole op is undef.
     assert(Ops.size() == 2 && "Div/rem should have 2 operands");
     SDValue Divisor = Ops[1];
-    if (Divisor.isUndef() || isNullConstant(Divisor))
+    if (Divisor.isUndefOrPoison() || isNullConstant(Divisor))
       return true;
 
     return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
-           llvm::any_of(Divisor->op_values(),
-                        [](SDValue V) { return V.isUndef() ||
-                                        isNullConstant(V); });
+           llvm::any_of(Divisor->op_values(), [](SDValue V) {
+             return V.isUndefOrPoison() || isNullConstant(V);
+           });
     // TODO: Handle signed overflow.
   }
   // TODO: Handle oversized shifts.
@@ -6900,7 +6902,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
         llvm::EVT OpVT = Ops[0].getOperand(0).getValueType();
         for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I) {
           SDValue Op = Ops[0].getOperand(I);
-          if (Op.isUndef()) {
+          if (Op.isUndefOrPoison()) {
             ScalarOps.push_back(getUNDEF(OpVT));
             continue;
           }
@@ -6997,7 +6999,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
   };
 
   auto IsBuildVectorSplatVectorOrUndef = [](const SDValue &Op) {
-    return Op.isUndef() || Op.getOpcode() == ISD::CONDCODE ||
+    return Op.isUndefOrPoison() || Op.getOpcode() == ISD::CONDCODE ||
            Op.getOpcode() == ISD::BUILD_VECTOR ||
            Op.getOpcode() == ISD::SPLAT_VECTOR;
   };
@@ -7040,7 +7042,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
       EVT InSVT = Op.getValueType().getScalarType();
       if (Op.getOpcode() != ISD::BUILD_VECTOR &&
           Op.getOpcode() != ISD::SPLAT_VECTOR) {
-        if (Op.isUndef())
+        if (Op.isUndefOrPoison())
           ScalarOps.push_back(getUNDEF(InSVT));
         else
           ScalarOps.push_back(Op);
@@ -7058,7 +7060,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
         // - if we fail to constant fold we can't guarantee the (dead) nodes
         // we're creating will be cleaned up before being visited for
         // legalization.
-        if (NewNodesMustHaveLegalTypes && !ScalarOp.isUndef() &&
+        if (NewNodesMustHaveLegalTypes && !ScalarOp.isUndefOrPoison() &&
             !isa<ConstantSDNode>(ScalarOp) &&
             TLI->getTypeAction(*getContext(), InSVT) !=
                 TargetLowering::TypeLegal)
@@ -7073,7 +7075,8 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
     SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
 
     // Scalar folding only succeeded if the result is a constant or UNDEF.
-    if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
+    if (!ScalarResult.isUndefOrPoison() &&
+        ScalarResult.getOpcode() != ISD::Constant &&
         ScalarResult.getOpcode() != ISD::ConstantFP)
       return SDValue();
 
@@ -7157,7 +7160,7 @@ SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
   case ISD::FSUB:
     // -0.0 - undef --> undef (consistent with "fneg undef")
     if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, /*AllowUndefs*/ true))
-      if (N1C && N1C->getValueAPF().isNegZero() && N2.isUndef())
+      if (N1C && N1C->getValueAPF().isNegZero() && N2.isUndefOrPoison())
         return getUNDEF(VT);
     [[fallthrough]];
 
@@ -7167,9 +7170,9 @@ SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
   case ISD::FREM:
     // If both operands are undef, the result is undef. If 1 operand is undef,
     // the result is NaN. This should match the behavior of the IR optimizer.
-    if (N1.isUndef() && N2.isUndef())
+    if (N1.isUndefOrPoison() && N2.isUndefOrPoison())
       return getUNDEF(VT);
-    if (N1.isUndef() || N2.isUndef())
+    if (N1.isUndefOrPoison() || N2.isUndefOrPoison())
       return getConstantFP(APFloat::getNaN(VT.getFltSemantics()), DL, VT);
   }
   return SDValue();
@@ -7485,7 +7488,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
              element type of the vector.");
 
     // Extract from an undefined value or using an undefined index is undefined.
-    if (N1.isUndef() || N2.isUndef())
+    if (N1.isUndefOrPoison() || N2.isUndefOrPoison())
       return getUNDEF(VT);
 
     // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length
@@ -7612,7 +7615,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
       return N1;
 
     // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
-    if (N1.isUndef())
+    if (N1.isUndefOrPoison())
       return getUNDEF(VT);
 
     // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
@@ -7637,7 +7640,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
     return SV;
 
   // Canonicalize an UNDEF to the RHS, even over a constant.
-  if (N1.isUndef()) {
+  if (N1.isUndefOrPoison()) {
     if (TLI->isCommutativeBinOp(Opcode)) {
       std::swap(N1, N2);
     } else {
@@ -7657,10 +7660,10 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
   }
 
   // Fold a bunch of operators when the RHS is undef.
-  if (N2.isUndef()) {
+  if (N2.isUndefOrPoison()) {
     switch (Opcode) {
     case ISD::XOR:
-      if (N1.isUndef())
+      if (N1.isUndefOrPoison())
         // Handle undef ^ undef -> 0 special case. This is a common
         // idiom (misuse).
         return getConstant(0, DL, VT);
@@ -7805,18 +7808,18 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
       return getUNDEF(VT);
 
     // Undefined index can be assumed out-of-bounds, so that's UNDEF too.
-    if (N3.isUndef())
+    if (N3.isUndefOrPoison())
       return getUNDEF(VT);
 
     // If the inserted element is an UNDEF, just use the input vector.
-    if (N2.isUndef())
+    if (N2.isUndefOrPoison())
       return N1;
 
     break;
   }
   case ISD::INSERT_SUBVECTOR: {
     // Inserting undef into undef is still undef.
-    if (N1.isUndef() && N2.isUndef())
+    if (N1.isUndefOrPoison() && N2.isUndefOrPoison())
       return getUNDEF(VT);
 
     EVT N2VT = N2.getValueType();
@@ -7847,7 +7850,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
 
     // If this is an insert of an extracted vector into an undef vector, we
     // can just use the input to the extract.
-    if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
+    if (N1.isUndefOrPoison() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
         N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT)
       return N2.getOperand(0);
     break;
@@ -7875,7 +7878,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
     assert(VecVT.getVectorElementCount() == MaskVT.getVectorElementCount() &&
            "Vector and mask must have same number of elements.");
 
-    if (N1.isUndef() || N2.isUndef())
+    if (N1.isUndefOrPoison() || N2.isUndefOrPoison())
       return N3;
 
     break;
@@ -7966,7 +7969,7 @@ SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
 /// operand.
 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
                               const SDLoc &dl) {
-  assert(!Value.isUndef());
+  assert(!Value.isUndefOrPoison());
 
   unsigned NumBits = VT.getScalarSizeInBits();
   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
@@ -8133,7 +8136,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
                                        const AAMDNodes &AAInfo, AAResults *AA) {
   // Turn a memcpy of undef to nop.
   // FIXME: We need to honor volatile even is Src is undef.
-  if (Src.isUndef())
+  if (Src.isUndefOrPoison())
     return Chain;
 
   // Expand memcpy to a series of load and store ops if the size operand falls
@@ -8336,7 +8339,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
                                         const AAMDNodes &AAInfo) {
   // Turn a memmove of undef to nop.
   // FIXME: We need to honor volatile even is Src is undef.
-  if (Src.isUndef())
+  if (Src.isUndefOrPoison())
     return Chain;
 
   // Expand memmove to a series of load and store ops if the size operand falls
@@ -8459,7 +8462,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
                                const AAMDNodes &AAInfo) {
   // Turn a memset of undef to nop.
   // FIXME: We need to honor volatile even is Src is undef.
-  if (Src.isUndef())
+  if (Src.isUndefOrPoison())
     return Chain;
 
   // Expand memset to a series of load/store ops if the size operand
@@ -9186,7 +9189,7 @@ static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
   // If the 'Offset' value isn't a constant, we can't handle this.
   if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
     return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
-  if (OffsetOp.isUndef())
+  if (OffsetOp.isUndefOrPoison())
     return InferPointerInfo(Info, DAG, Ptr);
   return Info;
 }
@@ -9237,7 +9240,8 @@ SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
   }
 
   bool Indexed = AM != ISD::UNINDEXED;
-  assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
+  assert((Indexed || Offset.isUndefOrPoison()) &&
+         "Unindexed load with an offset!");
 
   SDVTList VTs = Indexed ?
     getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
@@ -9310,7 +9314,8 @@ SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
                                      SDValue Base, SDValue Offset,
                                      ISD::MemIndexedMode AM) {
   LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
-  assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
+  assert(LD->getOffset().isUndefOrPoison() &&
+         "Load is already a indexed load!");
   // Don't propagate the invariant or dereferenceable flags.
   auto MMOFlags =
       LD->getMemOperand()->getFlags() &
@@ -9442,7 +9447,8 @@ SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
                                       SDValue Base, SDValue Offset,
                                       ISD::MemIndexedMode AM) {
   StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
-  assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
+  assert(ST->getOffset().isUndefOrPoison() &&
+         "Store is already a indexed store!");
   SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
   SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
   FoldingSetNodeID ID;
@@ -9497,7 +9503,8 @@ SDValue SelectionDAG::getLoadVP(ISD::MemIndexedMode AM,
                                 EVT MemVT, MachineMemOperand *MMO,
                                 bool IsExpanding) {
   bool Indexed = AM != ISD::UNINDEXED;
-  assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
+  assert((Indexed || Offset.isUndefOrPoison()) &&
+         "Unindexed load with an offset!");
 
   SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other)
                          : getVTList(VT, MVT::Other);
@@ -9572,7 +9579,8 @@ SDValue SelectionDAG::getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl,
                                        SDValue Base, SDValue Offset,
                                        ISD::MemIndexedMode AM) {
   auto *LD = cast<VPLoadSDNode>(OrigLoad);
-  assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
+  assert(LD->getOffset().isUndefOrPoison() &&
+         "Load is already a indexed load!");
   // Don't propagate the invariant or dereferenceable flags.
   auto MMOFlags =
       LD->getMemOperand()->getFlags() &
@@ -9591,7 +9599,8 @@ SDValue SelectionDAG::getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val,
                                  bool IsCompressing) {
   assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
   bool Indexed = AM != ISD::UNINDEXED;
-  assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!");
+  assert((Indexed || Offset.isUndefOrPoison()) &&
+         "Unindexed vp_store with an offset!");
   SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
                          : getVTList(MVT::Other);
   SDValue Ops[] = {Chain, Val, Ptr, Offset, Mask, EVL};
@@ -9694,7 +9703,8 @@ SDValue SelectionDAG::getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl,
                                         SDValue Base, SDValue Offset,
                                         ISD::MemIndexedMode AM) {
   auto *ST = cast<VPStoreSDNode>(OrigStore);
-  assert(ST->getOffset().isUndef() && "Store is already an indexed store!");
+  assert(ST->getOffset().isUndefOrPoison() &&
+         "Store is already an indexed store!");
   SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
   SDValue Ops[] = {ST->getChain(), ST->getValue(), Base,
                    Offset,         ST->getMask(),  ST->getVectorLength()};
@@ -9725,7 +9735,8 @@ SDValue SelectionDAG::getStridedLoadVP(
     SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask,
     SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding) {
   bool Indexed = AM != ISD::UNINDEXED;
-  assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
+  assert((Indexed || Offset.isUndefOrPoison()) &&
+         "Unindexed load with an offset!");
 
   SDValue Ops[] = {Chain, Ptr, Offset, Stride, Mask, EVL};
   SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other)
@@ -9782,7 +9793,8 @@ SDValue SelectionDAG::getStridedStoreVP(SDValue Chain, const SDLoc &DL,
                                         bool IsTruncating, bool IsCompressing) {
   assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
   bool Indexed = AM != ISD::UNINDEXED;
-  assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!");
+  assert((Indexed || Offset.isUndefOrPoison()) &&
+         "Unindexed vp_store with an offset!");
   SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
                          : getVTList(MVT::Other);
   SDValue Ops[] = {Chain, Val, Ptr, Offset, Stride, Mask, EVL};
@@ -9952,7 +9964,7 @@ SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
                                     ISD::MemIndexedMode AM,
                                     ISD::LoadExtType ExtTy, bool isExpanding) {
   bool Indexed = AM != ISD::UNINDEXED;
-  assert((Indexed || Offset.isUndef()) &&
+  assert((Indexed || Offset.isUndefOrPoison()) &&
          "Unindexed masked load with an offset!");
   SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other)
                          : getVTList(VT, MVT::Other);
@@ -9984,7 +9996,8 @@ SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl,
                                            SDValue Base, SDValue Offset,
                                            ISD::MemIndexedMode AM) {
   MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad);
-  assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!");
+  assert(LD->getOffset().isUndefOrPoison() &&
+         "Masked load is already a indexed load!");
   return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base,
                        Offset, LD->getMask(), LD->getPassThru(),
                        LD->getMemoryVT(), LD->getMemOperand(), AM,
@@ -10000,7 +10013,7 @@ SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
   assert(Chain.getValueType() == MVT::Other &&
         "Invalid chain type");
   bool Indexed = AM != ISD::UNINDEXED;
-  assert((Indexed || Offset.isUndef()) &&
+  assert((Indexed || Offset.isUndefOrPoison()) &&
          "Unindexed masked store with an offset!");
   SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other)
                          : getVTList(MVT::Other);
@@ -10033,7 +10046,7 @@ SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
                                             SDValue Base, SDValue Offset,
                                             ISD::MemIndexedMode AM) {
   MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore);
-  assert(ST->getOffset().isUndef() &&
+  assert(ST->getOffset().isUndefOrPoison() &&
          "Masked store is already a indexed store!");
   return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
                         ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
@@ -10229,11 +10242,11 @@ SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
   // select undef, T, F --> T (if T is a constant), otherwise F
   // select, ?, undef, F --> F
   // select, ?, T, undef --> T
-  if (Cond.isUndef())
+  if (Cond.isUndefOrPoison())
     return isConstantValueOfAnyType(T) ? T : F;
-  if (T.isUndef())
+  if (T.isUndefOrPoison())
     return F;
-  if (F.isUndef())
+  if (F.isUndefOrPoison())
     return T;
 
   // select true, T, F --> T
@@ -10250,10 +10263,10 @@ SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
 
 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) {
   // shift undef, Y --> 0 (can always assume that the undef value is 0)
-  if (X.isUndef())
+  if (X.isUndefOrPoison())
     return getConstant(0, SDLoc(X.getNode()), X.getValueType());
   // shift X, undef --> undef (because it may shift by the bitwidth)
-  if (Y.isUndef())
+  if (Y.isUndefOrPoison())
     return getUNDEF(X.getValueType());
 
   // shift 0, Y --> 0
@@ -10288,10 +10301,12 @@ SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
   bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
                 (YC && YC->getValueAPF().isInfinity());
 
-  if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef()))
+  if (Flags.hasNoNaNs() &&
+      (HasNan || X.isUndefOrPoison() || Y.isUndefOrPoison()))
     return getUNDEF(X.getValueType());
 
-  if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef()))
+  if (Flags.hasNoInfs() &&
+      (HasInf || X.isUndefOrPoison() || Y.isUndefOrPoison()))
     return getUNDEF(X.getValueType());
 
   if (!YC)
@@ -12211,7 +12226,7 @@ bool llvm::isNullConstant(SDValue V) {
 }
 
 bool llvm::isNullConstantOrUndef(SDValue V) {
-  return V.isUndef() || isNullConstant(V);
+  return V.isUndefOrPoison() || isNullConstant(V);
 }
 
 bool llvm::isNullFPConstant(SDValue V) {
@@ -13109,7 +13124,7 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
     SDValue OpVal = getOperand(i);
     unsigned BitPos = j * EltWidth;
 
-    if (OpVal.isUndef())
+    if (OpVal.isUndefOrPoison())
       SplatUndef.setBits(BitPos, BitPos + EltWidth);
     else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
       SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
@@ -13171,7 +13186,7 @@ SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
     if (!DemandedElts[i])
       continue;
     SDValue Op = getOperand(i);
-    if (Op.isUndef()) {
+    if (Op.isUndefOrPoison()) {
       if (UndefElements)
         (*UndefElements)[i] = true;
     } else if (!Splatted) {
@@ -13183,7 +13198,7 @@ SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
 
   if (!Splatted) {
     unsigned FirstDemandedIdx = DemandedElts.countr_zero();
-    assert(getOperand(FirstDemandedIdx).isUndef() &&
+    assert(getOperand(FirstDemandedIdx).isUndefOrPoison() &&
            "Can only have a splat without a constant for all undefs.");
     return getOperand(FirstDemandedIdx);
   }
@@ -13212,7 +13227,7 @@ bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts,
   // Set the undefs even if we don't find a sequence (like getSplatValue).
   if (UndefElements)
     for (unsigned I = 0; I != NumOps; ++I)
-      if (DemandedElts[I] && getOperand(I).isUndef())
+      if (DemandedElts[I] && getOperand(I).isUndefOrPoison())
         (*UndefElements)[I] = true;
 
   // Iteratively widen the sequence length looking for repetitions.
@@ -13223,12 +13238,12 @@ bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts,
         continue;
       SDValue &SeqOp = Sequence[I % SeqLen];
       SDValue Op = getOperand(I);
-      if (Op.isUndef()) {
+      if (Op.isUndefOrPoison()) {
         if (!SeqOp)
           SeqOp = Op;
         continue;
       }
-      if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) {
+      if (SeqOp && !SeqOp.isUndefOrPoison() && SeqOp != Op) {
         Sequence.clear();
         break;
       }
@@ -13309,7 +13324,7 @@ bool BuildVectorSDNode::getConstantRawBits(
 
   for (unsigned I = 0; I != NumSrcOps; ++I) {
     SDValue Op = getOperand(I);
-    if (Op.isUndef()) {
+    if (Op.isUndefOrPoison()) {
       SrcUndeElements.set(I);
       continue;
     }
@@ -13383,7 +13398,7 @@ void BuildVectorSDNode::recastRawBits(bool IsLittleEndian,
 bool BuildVectorSDNode::isConstant() const {
   for (const SDValue &Op : op_values()) {
     unsigned Opc = Op.getOpcode();
-    if (!Op.isUndef() && Opc != ISD::Constant && Opc != ISD::ConstantFP)
+    if (!Op.isUndefOrPoison() && Opc != ISD::Constant && Opc != ISD::ConstantFP)
       return false;
   }
   return true;
diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index a838003c34dfb02..81bef5ae163b106 100644
--- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -257,7 +257,7 @@ static bool willLowerDirectly(SDValue Incoming) {
   if (Incoming.getValueType().getSizeInBits() > 64)
     return false;
 
-  return isIntOrFPConstant(Incoming) || Incoming.isUndef();
+  return isIntOrFPConstant(Incoming) || Incoming.isUndefOrPoison();
 }
 
 /// Try to find existing copies of the incoming values in stack slots used for
@@ -443,8 +443,8 @@ lowerIncomingStatepointValue(SDValue Incoming, bool RequireSpillSlot,
     }
 
     assert(Incoming.getValueType().getSizeInBits() <= 64);
-    
-    if (Incoming.isUndef()) {
+
+    if (Incoming.isUndefOrPoison()) {
       // Put an easily recognized constant that's unlikely to be a valid
       // value so that uses of undef by the consumer of the stackmap is
       // easily recognized. This is legal since the compiler is always
@@ -1286,7 +1286,7 @@ void SelectionDAGBuilder::visitGCRelocate(const GCRelocateInst &Relocate) {
   assert(Record.type == RecordType::NoRelocate);
   SDValue SD = getValue(DerivedPtr);
 
-  if (SD.isUndef() && SD.getValueType().getSizeInBits() <= 64) {
+  if (SD.isUndefOrPoison() && SD.getValueType().getSizeInBits() <= 64) {
     // Lowering relocate(undef) as arbitrary constant. Current constant value
     // is chosen such that it's unlikely to be a valid pointer.
     setValue(&Relocate, DAG.getConstant(0xFEFEFEFE, SDLoc(SD), MVT::i64));
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 56194e2614af2dd..8274c448d87d3ab 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -681,7 +681,7 @@ SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
     return SDValue();
 
   // Ignore UNDEFs.
-  if (Op.isUndef())
+  if (Op.isUndefOrPoison())
     return SDValue();
 
   // Not demanding any bits/elts from Op.
@@ -1152,7 +1152,7 @@ bool TargetLowering::SimplifyDemandedBits(
   SDLoc dl(Op);
 
   // Undef operand.
-  if (Op.isUndef())
+  if (Op.isUndefOrPoison())
     return false;
 
   // We can't simplify target constants.
@@ -1454,7 +1454,7 @@ bool TargetLowering::SimplifyDemandedBits(
     // AND(INSERT_SUBVECTOR(C,X,I),M) -> INSERT_SUBVECTOR(AND(C,M),X,I)
     // iff 'C' is Undef/Constant and AND(X,M) == X (for DemandedBits).
     if (Op0.getOpcode() == ISD::INSERT_SUBVECTOR && !VT.isScalableVector() &&
-        (Op0.getOperand(0).isUndef() ||
+        (Op0.getOperand(0).isUndefOrPoison() ||
          ISD::isBuildVectorOfConstantSDNodes(Op0.getOperand(0).getNode())) &&
         Op0->hasOneUse()) {
       unsigned NumSubElts =
@@ -3049,7 +3049,8 @@ static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG,
       // nodes. Ignore opaque integers because they do not constant fold.
       SDValue Elt = BV->getOperand(Index);
       auto *C = dyn_cast<ConstantSDNode>(Elt);
-      if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque()))
+      if (isa<ConstantFPSDNode>(Elt) || Elt.isUndefOrPoison() ||
+          (C && !C->isOpaque()))
         return Elt;
     }
 
@@ -3067,7 +3068,8 @@ static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG,
     SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0);
     SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1);
     if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT)
-      if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef())
+      if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1)
+              .isUndefOrPoison())
         KnownUndef.setBit(i);
   }
   return KnownUndef;
@@ -3096,7 +3098,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
          "Mask size mismatches value type element count!");
 
   // Undef operand.
-  if (Op.isUndef()) {
+  if (Op.isUndefOrPoison()) {
     KnownUndef.setAllBits();
     return false;
   }
@@ -3282,7 +3284,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
         SmallVector<SDValue, 32> Ops(Op->ops());
         bool Updated = false;
         for (unsigned i = 0; i != NumElts; ++i) {
-          if (!DemandedElts[i] && !Ops[i].isUndef()) {
+          if (!DemandedElts[i] && !Ops[i].isUndefOrPoison()) {
             Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType());
             KnownUndef.setBit(i);
             Updated = true;
@@ -3294,7 +3296,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
     }
     for (unsigned i = 0; i != NumElts; ++i) {
       SDValue SrcOp = Op.getOperand(i);
-      if (SrcOp.isUndef()) {
+      if (SrcOp.isUndefOrPoison()) {
         KnownUndef.setBit(i);
       } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() &&
                  (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) {
@@ -3355,7 +3357,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
       return true;
 
     // If none of the src operand elements are demanded, replace it with undef.
-    if (!DemandedSrcElts && !Src.isUndef())
+    if (!DemandedSrcElts && !Src.isUndefOrPoison())
       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
                                                TLO.DAG.getUNDEF(VT), Sub,
                                                Op.getOperand(2)));
@@ -3428,7 +3430,7 @@ bool TargetLowering::SimplifyDemandedVectorElts(
                                      KnownZero, TLO, Depth + 1))
         return true;
 
-      KnownUndef.setBitVal(Idx, Scl.isUndef());
+      KnownUndef.setBitVal(Idx, Scl.isUndefOrPoison());
 
       KnownZero.setBitVal(Idx, isNullConstant(Scl) || isNullFPConstant(Scl));
       break;
@@ -7358,7 +7360,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
   case ISD::BUILD_VECTOR: {
     // Only permit BUILD_VECTOR of constants.
     if (llvm::any_of(Op->op_values(), [&](SDValue N) {
-          return !N.isUndef() && !isa<ConstantFPSDNode>(N);
+          return !N.isUndefOrPoison() && !isa<ConstantFPSDNode>(N);
         }))
       break;
 
@@ -7366,7 +7368,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
         (isOperationLegal(ISD::ConstantFP, VT) &&
          isOperationLegal(ISD::BUILD_VECTOR, VT)) ||
         llvm::all_of(Op->op_values(), [&](SDValue N) {
-          return N.isUndef() ||
+          return N.isUndefOrPoison() ||
                  isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT,
                               OptForSize);
         });
@@ -7376,7 +7378,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
 
     SmallVector<SDValue, 4> Ops;
     for (SDValue C : Op->op_values()) {
-      if (C.isUndef()) {
+      if (C.isUndefOrPoison()) {
         Ops.push_back(C);
         continue;
       }
@@ -11767,7 +11769,7 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node,
   SDValue Chain = DAG.getEntryNode();
   SDValue OutPos = DAG.getConstant(0, DL, PositionVT);
 
-  bool HasPassthru = !Passthru.isUndef();
+  bool HasPassthru = !Passthru.isUndefOrPoison();
 
   // If we have a passthru vector, store it on the stack, overwrite the matching
   // positions and then re-write the last element that was potentially
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 278dd95cd969d81..c5e3def58e733ce 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -6659,7 +6659,7 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
 
   // SVE supports zero (and so undef) passthrough values only, everything else
   // must be handled manually by an explicit select on the load's output.
-  if (!PassThru->isUndef() && !isZerosVector(PassThru.getNode())) {
+  if (!PassThru->isUndefOrPoison() && !isZerosVector(PassThru.getNode())) {
     SDValue Ops[] = {Chain, DAG.getUNDEF(VT), Mask, BasePtr, Index, Scale};
     SDValue Load =
         DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops,
@@ -6718,8 +6718,9 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
     MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
     Index = convertToScalableVector(DAG, ContainerVT, Index);
     Mask = convertFixedMaskToScalableVector(Mask, DAG);
-    PassThru = PassThru->isUndef() ? DAG.getUNDEF(ContainerVT)
-                                   : DAG.getConstant(0, DL, ContainerVT);
+    PassThru = PassThru->isUndefOrPoison()
+                   ? DAG.getUNDEF(ContainerVT)
+                   : DAG.getConstant(0, DL, ContainerVT);
 
     // Emit equivalent scalable vector gather.
     SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
@@ -6833,7 +6834,7 @@ SDValue AArch64TargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
   SDValue PassThru = LoadNode->getPassThru();
   SDValue Mask = LoadNode->getMask();
 
-  if (PassThru->isUndef() || isZerosVector(PassThru.getNode()))
+  if (PassThru->isUndefOrPoison() || isZerosVector(PassThru.getNode()))
     return Op;
 
   SDValue Load = DAG.getMaskedLoad(
@@ -7058,7 +7059,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
   EVT MaskVT = Mask.getValueType();
   EVT ElmtVT = VecVT.getVectorElementType();
   const bool IsFixedLength = VecVT.isFixedLengthVector();
-  const bool HasPassthru = !Passthru.isUndef();
+  const bool HasPassthru = !Passthru.isUndefOrPoison();
   unsigned MinElmts = VecVT.getVectorElementCount().getKnownMinValue();
   EVT FixedVecVT = MVT::getVectorVT(ElmtVT.getSimpleVT(), MinElmts);
 
@@ -12817,7 +12818,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
   SmallVector<ShuffleSourceInfo, 2> Sources;
   for (unsigned i = 0; i < NumElts; ++i) {
     SDValue V = Op.getOperand(i);
-    if (V.isUndef())
+    if (V.isUndefOrPoison())
       continue;
     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
              !isa<ConstantSDNode>(V.getOperand(1)) ||
@@ -12851,7 +12852,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
     unsigned OutputFactor = VT.getScalarSizeInBits() / 8;
     for (unsigned I = 0; I < NumElts; ++I) {
       SDValue V = Op.getOperand(I);
-      if (V.isUndef()) {
+      if (V.isUndefOrPoison()) {
         for (unsigned OF = 0; OF < OutputFactor; OF++)
           Mask.push_back(-1);
         continue;
@@ -13029,7 +13030,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
   int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
   for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
     SDValue Entry = Op.getOperand(i);
-    if (Entry.isUndef())
+    if (Entry.isUndefOrPoison())
       continue;
 
     auto Src = find(Sources, Entry.getOperand(0));
@@ -13594,7 +13595,7 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
   unsigned BytesPerElt = EltVT.getSizeInBits() / 8;
 
   bool Swap = false;
-  if (V1.isUndef() || isZerosVector(V1.getNode())) {
+  if (V1.isUndefOrPoison() || isZerosVector(V1.getNode())) {
     std::swap(V1, V2);
     Swap = true;
   }
@@ -13602,7 +13603,7 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
   // If the V2 source is undef or zero then we can use a tbl1, as tbl1 will fill
   // out of range values with 0s. We do need to make sure that any out-of-range
   // values are really out-of-range for a v16i8 vector.
-  bool IsUndefOrZero = V2.isUndef() || isZerosVector(V2.getNode());
+  bool IsUndefOrZero = V2.isUndefOrPoison() || isZerosVector(V2.getNode());
   MVT IndexVT = MVT::v8i8;
   unsigned IndexLen = 8;
   if (Op.getValueSizeInBits() == 128) {
@@ -13926,7 +13927,8 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
     Imm *= getExtFactor(V1);
     return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2,
                        DAG.getConstant(Imm, dl, MVT::i32));
-  } else if (V2->isUndef() && isSingletonEXTMask(ShuffleMask, VT, Imm)) {
+  } else if (V2->isUndefOrPoison() &&
+             isSingletonEXTMask(ShuffleMask, VT, Imm)) {
     Imm *= getExtFactor(V1);
     return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1,
                        DAG.getConstant(Imm, dl, MVT::i32));
@@ -14557,7 +14559,7 @@ static SDValue NormalizeBuildVector(SDValue Op,
       Lane = DAG.getConstant(
           CstLane->getAPIntValue().trunc(EltTy.getSizeInBits()).getZExtValue(),
           dl, MVT::i32);
-    } else if (Lane.getNode()->isUndef()) {
+    } else if (Lane.getNode()->isUndefOrPoison()) {
       Lane = DAG.getUNDEF(MVT::i32);
     } else {
       assert(Lane.getValueType() == MVT::i32 &&
@@ -14674,9 +14676,10 @@ SDValue AArch64TargetLowering::LowerFixedLengthBuildVectorToSVE(
   SDValue ZeroI64 = DAG.getConstant(0, DL, MVT::i64);
   SmallVector<SDValue, 16> Intermediates = map_to_vector<16>(
       Op->op_values(), [&, Undef = DAG.getUNDEF(ContainerVT)](SDValue Op) {
-        return Op.isUndef() ? Undef
-                            : DAG.getNode(ISD::INSERT_VECTOR_ELT, DL,
-                                          ContainerVT, Undef, Op, ZeroI64);
+        return Op.isUndefOrPoison()
+                   ? Undef
+                   : DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT, Undef,
+                                 Op, ZeroI64);
       });
 
   ElementCount ZipEC = ContainerVT.getVectorElementCount();
@@ -14687,8 +14690,9 @@ SDValue AArch64TargetLowering::LowerFixedLengthBuildVectorToSVE(
       SDValue Op0 = DAG.getBitcast(ZipVT, Intermediates[I + 0]);
       SDValue Op1 = DAG.getBitcast(ZipVT, Intermediates[I + 1]);
       Intermediates[I / 2] =
-          Op1.isUndef() ? Op0
-                        : DAG.getNode(AArch64ISD::ZIP1, DL, ZipVT, Op0, Op1);
+          Op1.isUndefOrPoison()
+              ? Op0
+              : DAG.getNode(AArch64ISD::ZIP1, DL, ZipVT, Op0, Op1);
     }
 
     Intermediates.resize(Intermediates.size() / 2);
@@ -14768,7 +14772,7 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
     SDValue V = Op.getOperand(i);
     if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
       AllLanesExtractElt = false;
-    if (V.isUndef()) {
+    if (V.isUndefOrPoison()) {
       ++NumUndefLanes;
       continue;
     }
@@ -15113,7 +15117,7 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
     // vector element types. After type-legalization the inserted value is
     // extended (i32) and it is safe to cast them to the vector type by ignoring
     // the upper bits of the lowest lane (e.g. v8i8, v4i16).
-    if (!Op0.isUndef()) {
+    if (!Op0.isUndefOrPoison()) {
       LLVM_DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n");
       Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0);
       ++i;
@@ -15124,7 +15128,7 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
     });
     for (; i < NumElts; ++i) {
       SDValue V = Op.getOperand(i);
-      if (V.isUndef())
+      if (V.isUndefOrPoison())
         continue;
       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
@@ -15353,7 +15357,7 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
     }
 
     // We can select these directly.
-    if (isTypeLegal(InVT) && Vec0.isUndef())
+    if (isTypeLegal(InVT) && Vec0.isUndefOrPoison())
       return Op;
 
     // Ensure the subvector is half the size of the main vector.
@@ -15397,7 +15401,7 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
 
   if (Idx == 0 && isPackedVectorType(VT, DAG)) {
     // This will be matched by custom code during ISelDAGToDAG.
-    if (Vec0.isUndef())
+    if (Vec0.isUndefOrPoison())
       return Op;
 
     std::optional<unsigned> PredPattern =
@@ -18736,7 +18740,7 @@ static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) {
   // Make sure all other operands are equally extended.
   bool SeenZExtOrSExt = !IsAnyExt;
   for (SDValue Op : drop_begin(BV->ops())) {
-    if (Op.isUndef())
+    if (Op.isUndefOrPoison())
       continue;
 
     if (calculatePreExtendType(Op) != PreExtendType)
@@ -18764,14 +18768,15 @@ static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) {
         PreExtendType.getScalarSizeInBits() < 32 ? MVT::i32 : PreExtendType;
     SmallVector<SDValue, 8> NewOps;
     for (SDValue Op : BV->ops())
-      NewOps.push_back(Op.isUndef() ? DAG.getUNDEF(PreExtendLegalType)
-                                    : DAG.getAnyExtOrTrunc(Op.getOperand(0), DL,
-                                                           PreExtendLegalType));
+      NewOps.push_back(
+          Op.isUndefOrPoison()
+              ? DAG.getUNDEF(PreExtendLegalType)
+              : DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, PreExtendLegalType));
     NBV = DAG.getNode(ISD::BUILD_VECTOR, DL, PreExtendVT, NewOps);
   } else { // BV.getOpcode() == ISD::VECTOR_SHUFFLE
     EVT PreExtendVT = VT.changeVectorElementType(PreExtendType.getScalarType());
     NBV = DAG.getVectorShuffle(PreExtendVT, DL, BV.getOperand(0).getOperand(0),
-                               BV.getOperand(1).isUndef()
+                               BV.getOperand(1).isUndefOrPoison()
                                    ? DAG.getUNDEF(PreExtendVT)
                                    : BV.getOperand(1).getOperand(0),
                                cast<ShuffleVectorSDNode>(BV)->getMask());
@@ -20084,7 +20089,7 @@ static SDValue performConcatVectorsCombine(SDNode *N,
         all_of(N->op_values(), [SrcVT](SDValue V) {
           if (V.getValueType() != SrcVT)
             return false;
-          if (V.isUndef())
+          if (V.isUndefOrPoison())
             return true;
           LoadSDNode *LD = dyn_cast<LoadSDNode>(V);
           return LD && V.hasOneUse() && LD->isSimple() && !LD->isIndexed() &&
@@ -20096,7 +20101,7 @@ static SDValue performConcatVectorsCombine(SDNode *N,
 
       for (unsigned i = 0; i < N->getNumOperands(); i++) {
         SDValue V = N->getOperand(i);
-        if (V.isUndef())
+        if (V.isUndefOrPoison())
           Ops.push_back(DAG.getUNDEF(FVT));
         else {
           LoadSDNode *LD = cast<LoadSDNode>(V);
@@ -20157,7 +20162,8 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     SDValue N10 = N1->getOperand(0);
     SDValue N11 = N1->getOperand(1);
 
-    if (!N00.isUndef() && !N01.isUndef() && !N10.isUndef() && !N11.isUndef()) {
+    if (!N00.isUndefOrPoison() && !N01.isUndefOrPoison() &&
+        !N10.isUndefOrPoison() && !N11.isUndefOrPoison()) {
       SDValue Concat0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, N00, N10);
       SDValue Concat1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, N01, N11);
       return DAG.getNode(N0Opc, dl, VT, Concat0, Concat1);
@@ -20194,10 +20200,10 @@ static SDValue performConcatVectorsCombine(SDNode *N,
   if (N->getNumOperands() == 2 && IsRSHRN(N0) &&
       ((IsRSHRN(N1) &&
         N0.getConstantOperandVal(1) == N1.getConstantOperandVal(1)) ||
-       N1.isUndef())) {
+       N1.isUndefOrPoison())) {
     SDValue X = N0.getOperand(0).getOperand(0);
-    SDValue Y = N1.isUndef() ? DAG.getUNDEF(X.getValueType())
-                             : N1.getOperand(0).getOperand(0);
+    SDValue Y = N1.isUndefOrPoison() ? DAG.getUNDEF(X.getValueType())
+                                     : N1.getOperand(0).getOperand(0);
     EVT BVT =
         X.getValueType().getDoubleNumVectorElementsVT(*DCI.DAG.getContext());
     SDValue CC = DAG.getNode(ISD::CONCAT_VECTORS, dl, BVT, X, Y);
@@ -20297,7 +20303,7 @@ performInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
     return SDValue();
 
   // Ignore widening patterns.
-  if (IdxVal == 0 && Vec.isUndef())
+  if (IdxVal == 0 && Vec.isUndefOrPoison())
     return SDValue();
 
   // Subvector must be half the width and an "aligned" insertion.
@@ -22528,7 +22534,7 @@ static SDValue performZExtDeinterleaveShuffleCombine(SDNode *N,
   bool IsUndefDeInterleave = false;
   if (!IsDeInterleave)
     IsUndefDeInterleave =
-        Shuffle->getOperand(1).isUndef() &&
+        Shuffle->getOperand(1).isUndefOrPoison() &&
         ShuffleVectorInst::isDeInterleaveMaskOfFactor(
             Shuffle->getMask().slice(ExtOffset + VT.getVectorNumElements() / 2,
                                      VT.getVectorNumElements() / 2),
@@ -23062,7 +23068,7 @@ static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) {
   assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!");
 
   // splice(pg, op1, undef) -> op1
-  if (N->getOperand(2).isUndef())
+  if (N->getOperand(2).isUndefOrPoison())
     return N->getOperand(1);
 
   return SDValue();
@@ -23075,7 +23081,7 @@ static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG,
          "Unexpected Opcode!");
 
   // uunpklo/hi undef -> undef
-  if (N->getOperand(0).isUndef())
+  if (N->getOperand(0).isUndefOrPoison())
     return DAG.getUNDEF(N->getValueType(0));
 
   // If this is a masked load followed by an UUNPKLO, fold this into a masked
@@ -23089,7 +23095,7 @@ static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG,
 
     if (MLD->isUnindexed() && MLD->getExtensionType() != ISD::SEXTLOAD &&
         SDValue(MLD, 0).hasOneUse() && Mask->getOpcode() == AArch64ISD::PTRUE &&
-        (MLD->getPassThru()->isUndef() ||
+        (MLD->getPassThru()->isUndefOrPoison() ||
          isZerosVector(MLD->getPassThru().getNode()))) {
       unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
       unsigned PgPattern = Mask->getConstantOperandVal(0);
@@ -23684,7 +23690,7 @@ static SDValue combineV3I8LoadExt(LoadSDNode *LD, SelectionDAG &DAG) {
   SDValue Chain = LD->getChain();
   SDValue BasePtr = LD->getBasePtr();
   MachineMemOperand *MMO = LD->getMemOperand();
-  assert(LD->getOffset().isUndef() && "undef offset expected");
+  assert(LD->getOffset().isUndefOrPoison() && "undef offset expected");
 
   // Load 2 x i8, then 1 x i8.
   SDValue L16 = DAG.getLoad(MVT::i16, DL, Chain, BasePtr, MMO);
@@ -23953,7 +23959,7 @@ static SDValue combineI8TruncStore(StoreSDNode *ST, SelectionDAG &DAG,
       ValueVT != EVT::getVectorVT(*DAG.getContext(), MVT::i8, 3))
     return SDValue();
 
-  assert(ST->getOffset().isUndef() && "undef offset expected");
+  assert(ST->getOffset().isUndefOrPoison() && "undef offset expected");
   SDLoc DL(ST);
   auto WideVT = EVT::getVectorVT(
       *DAG.getContext(),
@@ -26231,7 +26237,7 @@ static SDValue performDupLane128Combine(SDNode *N, SelectionDAG &DAG) {
   if (Insert.getOpcode() != ISD::INSERT_SUBVECTOR)
     return SDValue();
 
-  if (!Insert.getOperand(0).isUndef())
+  if (!Insert.getOperand(0).isUndefOrPoison())
     return SDValue();
 
   uint64_t IdxInsert = Insert.getConstantOperandVal(2);
@@ -26923,7 +26929,7 @@ bool AArch64TargetLowering::getIndexedAddressParts(SDNode *N, SDNode *Op,
   }
 
   auto IsUndefOrZero = [](SDValue V) {
-    return V.isUndef() || isNullOrNullSplat(V, /*AllowUndefs*/ true);
+    return V.isUndefOrPoison() || isNullOrNullSplat(V, /*AllowUndefs*/ true);
   };
 
   // If the only user of the value is a scalable vector splat, it is
@@ -27009,10 +27015,11 @@ static void replaceBoolVectorBitcast(SDNode *N,
   // Special handling for Clang's __builtin_convertvector. For vectors with <8
   // elements, it adds a vector concatenation with undef(s). If we encounter
   // this here, we can skip the concat.
-  if (Op.getOpcode() == ISD::CONCAT_VECTORS && !Op.getOperand(0).isUndef()) {
+  if (Op.getOpcode() == ISD::CONCAT_VECTORS &&
+      !Op.getOperand(0).isUndefOrPoison()) {
     bool AllUndef = true;
     for (unsigned I = 1; I < Op.getNumOperands(); ++I)
-      AllUndef &= Op.getOperand(I).isUndef();
+      AllUndef &= Op.getOperand(I).isUndefOrPoison();
 
     if (AllUndef)
       Op = Op.getOperand(0);
@@ -27111,7 +27118,7 @@ static void ReplaceAddWithADDP(SDNode *N, SmallVectorImpl<SDValue> &Results,
       return;
   }
 
-  if (Shuf->getOperand(0) != X || !Shuf->getOperand(1)->isUndef())
+  if (Shuf->getOperand(0) != X || !Shuf->getOperand(1)->isUndefOrPoison())
     return;
 
   // Check the mask is 1,0,3,2,5,4,...
@@ -28620,7 +28627,7 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorMLoadToSVE(
   SDValue PassThru;
   bool IsPassThruZeroOrUndef = false;
 
-  if (Load->getPassThru()->isUndef()) {
+  if (Load->getPassThru()->isUndefOrPoison()) {
     PassThru = DAG.getUNDEF(ContainerVT);
     IsPassThruZeroOrUndef = true;
   } else {
@@ -29634,7 +29641,7 @@ SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE(
   unsigned MaxSVESize = Subtarget->getMaxSVEVectorSizeInBits();
   if (MinSVESize == MaxSVESize && MaxSVESize == VT.getSizeInBits()) {
     if (ShuffleVectorInst::isReverseMask(ShuffleMask, ShuffleMask.size()) &&
-        Op2.isUndef()) {
+        Op2.isUndefOrPoison()) {
       Op = DAG.getNode(ISD::VECTOR_REVERSE, DL, ContainerVT, Op1);
       return convertFromScalableVector(DAG, VT, Op);
     }
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 2e517c21fc4a868..c5141075a10b2e9 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -7837,7 +7837,7 @@ static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
   SDValue FirstOp = Op.getOperand(0);
   if (!isa<ConstantSDNode>(FirstOp) &&
       llvm::all_of(llvm::drop_begin(Op->ops()), [&FirstOp](const SDUse &U) {
-        return U.get().isUndef() || U.get() == FirstOp;
+        return U.get().isUndefOrPoison() || U.get() == FirstOp;
       })) {
     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, FirstOp,
                               DAG.getValueType(MVT::i1));
@@ -7848,9 +7848,9 @@ static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
   unsigned Bits32 = 0;
   for (unsigned i = 0; i < NumElts; ++i) {
     SDValue V = Op.getOperand(i);
-    if (!isa<ConstantSDNode>(V) && !V.isUndef())
+    if (!isa<ConstantSDNode>(V) && !V.isUndefOrPoison())
       continue;
-    bool BitSet = V.isUndef() ? false : V->getAsZExtVal();
+    bool BitSet = V.isUndefOrPoison() ? false : V->getAsZExtVal();
     if (BitSet)
       Bits32 |= BoolMask << (i * BitsPerBool);
   }
@@ -7860,7 +7860,7 @@ static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
                              DAG.getConstant(Bits32, dl, MVT::i32));
   for (unsigned i = 0; i < NumElts; ++i) {
     SDValue V = Op.getOperand(i);
-    if (isa<ConstantSDNode>(V) || V.isUndef())
+    if (isa<ConstantSDNode>(V) || V.isUndefOrPoison())
       continue;
     Base = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Base, V,
                        DAG.getConstant(i, dl, MVT::i32));
@@ -8044,7 +8044,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
   SDValue Value;
   for (unsigned i = 0; i < NumElts; ++i) {
     SDValue V = Op.getOperand(i);
-    if (V.isUndef())
+    if (V.isUndefOrPoison())
       continue;
     if (i > 0)
       isOnlyLowElement = false;
@@ -8203,7 +8203,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
     SDValue Vec = DAG.getUNDEF(VT);
     for (unsigned i = 0 ; i < NumElts; ++i) {
       SDValue V = Op.getOperand(i);
-      if (V.isUndef())
+      if (V.isUndefOrPoison())
         continue;
       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
@@ -8248,7 +8248,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
   SmallVector<ShuffleSourceInfo, 2> Sources;
   for (unsigned i = 0; i < NumElts; ++i) {
     SDValue V = Op.getOperand(i);
-    if (V.isUndef())
+    if (V.isUndefOrPoison())
       continue;
     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
       // A shuffle can only come from building a vector from various
@@ -8377,7 +8377,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
   int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
   for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
     SDValue Entry = Op.getOperand(i);
-    if (Entry.isUndef())
+    if (Entry.isUndefOrPoison())
       continue;
 
     auto Src = llvm::find(Sources, Entry.getOperand(0));
@@ -8577,7 +8577,7 @@ static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
   for (int I : ShuffleMask)
     VTBLMask.push_back(DAG.getSignedConstant(I, DL, MVT::i32));
 
-  if (V2.getNode()->isUndef())
+  if (V2.getNode()->isUndefOrPoison())
     return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
                        DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
 
@@ -8684,8 +8684,9 @@ static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG,
   // fields in a register into 8 other arbitrary 2-bit fields!
   SDValue PredAsVector1 = PromoteMVEPredVector(dl, V1, VT, DAG);
   EVT NewVT = PredAsVector1.getValueType();
-  SDValue PredAsVector2 = V2.isUndef() ? DAG.getUNDEF(NewVT)
-                                       : PromoteMVEPredVector(dl, V2, VT, DAG);
+  SDValue PredAsVector2 = V2.isUndefOrPoison()
+                              ? DAG.getUNDEF(NewVT)
+                              : PromoteMVEPredVector(dl, V2, VT, DAG);
   assert(PredAsVector2.getValueType() == NewVT &&
          "Expected identical vector type in expanded i1 shuffle!");
 
@@ -8880,7 +8881,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
           !isa<ConstantSDNode>(V1.getOperand(0))) {
         bool IsScalarToVector = true;
         for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
-          if (!V1.getOperand(i).isUndef()) {
+          if (!V1.getOperand(i).isUndefOrPoison()) {
             IsScalarToVector = false;
             break;
           }
@@ -8907,7 +8908,8 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
     if (isVREVMask(ShuffleMask, VT, 16))
       return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
 
-    if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
+    if (ST->hasNEON() && V2->isUndefOrPoison() &&
+        isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
                          DAG.getConstant(Imm, dl, MVT::i32));
     }
@@ -8955,7 +8957,8 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
     // ->
     //   concat(VZIP(v1, v2):0, :1)
     //
-    if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) {
+    if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS &&
+        V2->isUndefOrPoison()) {
       SDValue SubV1 = V1->getOperand(0);
       SDValue SubV2 = V1->getOperand(1);
       EVT SubVT = SubV1.getValueType();
@@ -9269,11 +9272,11 @@ static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
   SDValue Val = DAG.getUNDEF(MVT::v2f64);
   SDValue Op0 = Op.getOperand(0);
   SDValue Op1 = Op.getOperand(1);
-  if (!Op0.isUndef())
+  if (!Op0.isUndefOrPoison())
     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
                       DAG.getIntPtrConstant(0, dl));
-  if (!Op1.isUndef())
+  if (!Op1.isUndefOrPoison())
     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
                       DAG.getIntPtrConstant(1, dl));
@@ -10315,7 +10318,7 @@ static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) {
   bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST ||
                              PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) &&
                             isZeroVector(PassThru->getOperand(0));
-  if (!PassThru.isUndef() && !PassThruIsCastZero)
+  if (!PassThru.isUndefOrPoison() && !PassThruIsCastZero)
     Combo = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
   return DAG.getMergeValues({Combo, NewLoad.getValue(1)}, dl);
 }
@@ -13535,7 +13538,7 @@ static SDValue PerformVSetCCToVCTPCombine(SDNode *N,
 
   // Check first operand is BuildVector of 0,1,2,...
   for (unsigned I = 0; I < VT.getVectorNumElements(); I++) {
-    if (!Op0.getOperand(I).isUndef() &&
+    if (!Op0.getOperand(I).isUndefOrPoison() &&
         !(isa<ConstantSDNode>(Op0.getOperand(I)) &&
           Op0.getConstantOperandVal(I) == I))
       return SDValue();
@@ -15399,7 +15402,7 @@ PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
       // Assume only bit cast to i32 will go away.
       if (Elt->getOperand(0).getValueType() == MVT::i32)
         ++NumOfBitCastedElts;
-    } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt))
+    } else if (Elt.isUndefOrPoison() || isa<ConstantSDNode>(Elt))
       // Constants are statically casted, thus do not count them as
       // relevant operands.
       --NumOfRelevantElts;
@@ -15426,7 +15429,7 @@ PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
   SDLoc dl(N);
   for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
     SDValue V = N->getOperand(Idx);
-    if (V.isUndef())
+    if (V.isUndefOrPoison())
       continue;
     if (V.getOpcode() == ISD::BITCAST &&
         V->getOperand(0).getValueType() == MVT::i32)
@@ -15494,7 +15497,7 @@ static SDValue PerformVECTOR_REG_CASTCombine(SDNode *N, SelectionDAG &DAG,
   if (Op.getValueType() == VT)
     return Op;
   // VECTOR_REG_CAST undef -> undef
-  if (Op.isUndef())
+  if (Op.isUndefOrPoison())
     return DAG.getUNDEF(VT);
 
   // VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x)
@@ -15719,7 +15722,7 @@ PerformInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
     return SDValue();
 
   // Ignore widening patterns.
-  if (IdxVal == 0 && Vec.isUndef())
+  if (IdxVal == 0 && Vec.isUndefOrPoison())
     return SDValue();
 
   // Subvector must be half the width and an "aligned" insertion.
@@ -15750,7 +15753,8 @@ static SDValue PerformShuffleVMOVNCombine(ShuffleVectorSDNode *N,
                                           SelectionDAG &DAG) {
   SDValue Trunc = N->getOperand(0);
   EVT VT = Trunc.getValueType();
-  if (Trunc.getOpcode() != ARMISD::MVETRUNC || !N->getOperand(1).isUndef())
+  if (Trunc.getOpcode() != ARMISD::MVETRUNC ||
+      !N->getOperand(1).isUndefOrPoison())
     return SDValue();
 
   SDLoc DL(Trunc);
@@ -15794,7 +15798,7 @@ static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
     return SDValue();
   SDValue Concat0Op1 = Op0.getOperand(1);
   SDValue Concat1Op1 = Op1.getOperand(1);
-  if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef())
+  if (!Concat0Op1.isUndefOrPoison() || !Concat1Op1.isUndefOrPoison())
     return SDValue();
   // Skip the transformation if any of the types are illegal.
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -16715,7 +16719,7 @@ static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St,
   auto isVMOVNShuffle = [&](ShuffleVectorSDNode *SVN, bool Rev) {
     ArrayRef<int> M = SVN->getMask();
     unsigned NumElts = ToVT.getVectorNumElements();
-    if (SVN->getOperand(1).isUndef())
+    if (SVN->getOperand(1).isUndefOrPoison())
       NumElts /= 2;
 
     unsigned Off0 = Rev ? NumElts : 0;
@@ -17404,7 +17408,7 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
 static SDValue PerformReduceShuffleCombine(SDNode *N, SelectionDAG &DAG) {
   unsigned VecOp = N->getOperand(0).getValueType().isVector() ? 0 : 2;
   auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(VecOp));
-  if (!Shuf || !Shuf->getOperand(1).isUndef())
+  if (!Shuf || !Shuf->getOperand(1).isUndefOrPoison())
     return SDValue();
 
   // Check all elements are used once in the mask.
@@ -17420,7 +17424,8 @@ static SDValue PerformReduceShuffleCombine(SDNode *N, SelectionDAG &DAG) {
 
   if (N->getNumOperands() != VecOp + 1) {
     auto *Shuf2 = dyn_cast<ShuffleVectorSDNode>(N->getOperand(VecOp + 1));
-    if (!Shuf2 || !Shuf2->getOperand(1).isUndef() || Shuf2->getMask() != Mask)
+    if (!Shuf2 || !Shuf2->getOperand(1).isUndefOrPoison() ||
+        Shuf2->getMask() != Mask)
       return SDValue();
   }
 
@@ -17443,9 +17448,9 @@ static SDValue PerformVMOVNCombine(SDNode *N,
   // VMOVNT a undef -> a
   // VMOVNB a undef -> a
   // VMOVNB undef a -> a
-  if (Op1->isUndef())
+  if (Op1->isUndefOrPoison())
     return Op0;
-  if (Op0->isUndef() && !IsTop)
+  if (Op0->isUndefOrPoison() && !IsTop)
     return Op1;
 
   // VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b)
@@ -17500,7 +17505,8 @@ static SDValue PerformVQDMULHCombine(SDNode *N,
   auto *Shuf1 = dyn_cast<ShuffleVectorSDNode>(RHS);
   // Turn VQDMULH(shuffle, shuffle) -> shuffle(VQDMULH)
   if (Shuf0 && Shuf1 && Shuf0->getMask().equals(Shuf1->getMask()) &&
-      LHS.getOperand(1).isUndef() && RHS.getOperand(1).isUndef() &&
+      LHS.getOperand(1).isUndefOrPoison() &&
+      RHS.getOperand(1).isUndefOrPoison() &&
       (LHS.hasOneUse() || RHS.hasOneUse() || LHS == RHS)) {
     SDLoc DL(N);
     SDValue NewBinOp = DCI.DAG.getNode(N->getOpcode(), DL, VT,
@@ -18658,7 +18664,7 @@ SDValue ARMTargetLowering::PerformMVETruncCombine(
   SDLoc DL(N);
 
   // MVETrunc(Undef, Undef) -> Undef
-  if (all_of(N->ops(), [](SDValue Op) { return Op.isUndef(); }))
+  if (all_of(N->ops(), [](SDValue Op) { return Op.isUndefOrPoison(); }))
     return DAG.getUNDEF(VT);
 
   // MVETrunc(MVETrunc a b, MVETrunc c, d) -> MVETrunc
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 691107abf3e890a..ed09d39449d98c2 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2345,7 +2345,7 @@ bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
 
   // If both vector operands for the shuffle are the same vector, the mask will
   // contain only elements from the first one and the second one will be undef.
-  if (N->getOperand(1).isUndef()) {
+  if (N->getOperand(1).isUndefOrPoison()) {
     ShiftElts = 0;
     Swap = true;
     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
@@ -2385,7 +2385,7 @@ bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
 
   // If both vector operands for the shuffle are the same vector, the mask will
   // contain only elements from the first one and the second one will be undef.
-  if (N->getOperand(1).isUndef()) {
+  if (N->getOperand(1).isUndefOrPoison()) {
     assert(M0 < 4 && "Indexing into an undef vector?");
     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
       return false;
@@ -2483,7 +2483,7 @@ bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
 
   // If both vector operands for the shuffle are the same vector, the mask will
   // contain only elements from the first one and the second one will be undef.
-  if (N->getOperand(1).isUndef()) {
+  if (N->getOperand(1).isUndefOrPoison()) {
     if ((M0 | M1) < 2) {
       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
       Swap = false;
@@ -2560,7 +2560,8 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
 
     // See if all of the elements in the buildvector agree across.
     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
-      if (N->getOperand(i).isUndef()) continue;
+      if (N->getOperand(i).isUndefOrPoison())
+        continue;
       // If the element isn't a constant, bail fully out.
       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
 
@@ -2605,7 +2606,8 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
 
   // Check to see if this buildvec has a single non-undef value in its elements.
   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
-    if (N->getOperand(i).isUndef()) continue;
+    if (N->getOperand(i).isUndefOrPoison())
+      continue;
     if (!OpVal.getNode())
       OpVal = N->getOperand(i);
     else if (OpVal != N->getOperand(i))
@@ -8628,7 +8630,7 @@ bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
     return false;
 
   RLI.Ptr = LD->getBasePtr();
-  if (LD->isIndexed() && !LD->getOffset().isUndef()) {
+  if (LD->isIndexed() && !LD->getOffset().isUndefOrPoison()) {
     assert(LD->getAddressingMode() == ISD::PRE_INC &&
            "Non-pre-inc AM on PPC?");
     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
@@ -9472,7 +9474,7 @@ static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
   if (V->isConstant())
     return false;
   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
-    if (V->getOperand(i).isUndef())
+    if (V->getOperand(i).isUndefOrPoison())
       return false;
     // We want to expand nodes that represent load-and-splat even if the
     // loaded value is a floating point truncation or conversion to int.
@@ -9692,7 +9694,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
       // BUILD_VECTOR is a separate use of the value.
       unsigned NumUsesOfInputLD = 128 / ElementSize;
       for (SDValue BVInOp : Op->ops())
-        if (BVInOp.isUndef())
+        if (BVInOp.isUndefOrPoison())
           NumUsesOfInputLD--;
 
       // Exclude somes case where LD_SPLAT is worse than scalar_to_vector:
@@ -10023,7 +10025,7 @@ SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
     unsigned CurrentElement = Mask[i];
     // If 2nd operand is undefined, we should only look for element 7 in the
     // Mask.
-    if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
+    if (V2.isUndefOrPoison() && CurrentElement != VINSERTBSrcElem)
       continue;
 
     bool OtherElementsInOrder = true;
@@ -10035,8 +10037,9 @@ SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
       // in which we always assume we're always picking from the 1st operand.
-      int MaskOffset =
-          (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
+      int MaskOffset = (!V2.isUndefOrPoison() && CurrentElement < BytesInVector)
+                           ? BytesInVector
+                           : 0;
       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
         OtherElementsInOrder = false;
         break;
@@ -10047,7 +10050,7 @@ SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
     // in the vector we should insert into.
     if (OtherElementsInOrder) {
       // If 2nd operand is undefined, we assume no shifts and no swapping.
-      if (V2.isUndef()) {
+      if (V2.isUndefOrPoison()) {
         ShiftElts = 0;
         Swap = false;
       } else {
@@ -10069,7 +10072,7 @@ SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
   // optionally with VECSHL if shift is required.
   if (Swap)
     std::swap(V1, V2);
-  if (V2.isUndef())
+  if (V2.isUndefOrPoison())
     V2 = V1;
   if (ShiftElts) {
     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
@@ -10138,7 +10141,7 @@ SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
     // If both vector operands for the shuffle are the same vector, the mask
     // will contain only elements from the first one and the second one will be
     // undef.
-    if (V2.isUndef()) {
+    if (V2.isUndefOrPoison()) {
       ShiftElts = 0;
       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
       TargetOrder = OriginalOrderLow;
@@ -10175,7 +10178,7 @@ SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
   // optionally with VECSHL if shift is required.
   if (Swap)
     std::swap(V1, V2);
-  if (V2.isUndef())
+  if (V2.isUndefOrPoison())
     V2 = V1;
   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
   if (ShiftElts) {
@@ -10330,7 +10333,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
   // combine it because that will just produce multiple loads.
   bool IsPermutedLoad = false;
   const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
-  if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
+  if (InputLoad && Subtarget.hasVSX() && V2.isUndefOrPoison() &&
       (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
       InputLoad->hasOneUse()) {
     bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
@@ -10390,7 +10393,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
   if (Subtarget.hasP9Vector() &&
       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
                            isLittleEndian)) {
-    if (V2.isUndef())
+    if (V2.isUndefOrPoison())
       V2 = V1;
     else if (Swap)
       std::swap(V1, V2);
@@ -10428,8 +10431,8 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
     if (Swap)
       std::swap(V1, V2);
     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
-    SDValue Conv2 =
-        DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
+    SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
+                                V2.isUndefOrPoison() ? V1 : V2);
 
     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
                               DAG.getConstant(ShiftElts, dl, MVT::i32));
@@ -10441,8 +10444,8 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
     if (Swap)
       std::swap(V1, V2);
     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
-    SDValue Conv2 =
-        DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
+    SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
+                                V2.isUndefOrPoison() ? V1 : V2);
 
     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
                               DAG.getConstant(ShiftElts, dl, MVT::i32));
@@ -10470,7 +10473,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
   }
 
   if (Subtarget.hasVSX()) {
-    if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
+    if (V2.isUndefOrPoison() && PPC::isSplatShuffleMask(SVOp, 4)) {
       int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
 
       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
@@ -10480,7 +10483,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
     }
 
     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
-    if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
+    if (V2.isUndefOrPoison() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
@@ -10490,7 +10493,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
   // Cases that are handled by instructions that take permute immediates
   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
   // selected by the instruction selector.
-  if (V2.isUndef()) {
+  if (V2.isUndefOrPoison()) {
     if (PPC::isSplatShuffleMask(SVOp, 1) ||
         PPC::isSplatShuffleMask(SVOp, 2) ||
         PPC::isSplatShuffleMask(SVOp, 4) ||
@@ -10591,7 +10594,8 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
 
   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
   // vector that will get spilled to the constant pool.
-  if (V2.isUndef()) V2 = V1;
+  if (V2.isUndefOrPoison())
+    V2 = V1;
 
   return LowerVPERM(Op, DAG, PermMask, VT, V1, V2);
 }
@@ -15028,7 +15032,7 @@ combineElementTruncationToVectorTruncation(SDNode *N,
       if (Is32Bit) {
         // For 32-bit values, we need to add an FP_ROUND node (if we made it
         // here, we know that all inputs are extending loads so this is safe).
-        if (In.isUndef())
+        if (In.isUndefOrPoison())
           Ops.push_back(DAG.getUNDEF(SrcVT));
         else {
           SDValue Trunc =
@@ -15037,7 +15041,8 @@ combineElementTruncationToVectorTruncation(SDNode *N,
           Ops.push_back(Trunc);
         }
       } else
-        Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
+        Ops.push_back(In.isUndefOrPoison() ? DAG.getUNDEF(SrcVT)
+                                           : In.getOperand(0));
     }
 
     unsigned Opcode;
@@ -15730,13 +15735,13 @@ static bool isSplatBV(SDValue Op) {
   // Find first non-undef input.
   for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
     FirstOp = Op.getOperand(i);
-    if (!FirstOp.isUndef())
+    if (!FirstOp.isUndefOrPoison())
       break;
   }
 
   // All inputs are undef or the same as the first non-undef input.
   for (int i = 1, e = Op.getNumOperands(); i < e; i++)
-    if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
+    if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndefOrPoison())
       return false;
   return true;
 }
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 95f1deed8b6c029..97ff64fa2ea7a08 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3335,7 +3335,7 @@ getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget,
               const SDLoc &DL, EVT VT, SDValue Passthru, SDValue Op,
               SDValue Offset, SDValue Mask, SDValue VL,
               unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
-  if (Passthru.isUndef())
+  if (Passthru.isUndefOrPoison())
     Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
   SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
   SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp};
@@ -3347,7 +3347,7 @@ getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL,
             EVT VT, SDValue Passthru, SDValue Op, SDValue Offset, SDValue Mask,
             SDValue VL,
             unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {
-  if (Passthru.isUndef())
+  if (Passthru.isUndefOrPoison())
     Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
   SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
   SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp};
@@ -3419,7 +3419,7 @@ static std::optional<VIDSequence> isSimpleVIDSequence(SDValue Op,
   SmallVector<std::optional<APInt>> Elts(Op.getNumOperands());
   const unsigned OpSize = Op.getScalarValueSizeInBits();
   for (auto [Idx, Elt] : enumerate(Op->op_values())) {
-    if (Elt.isUndef()) {
+    if (Elt.isUndefOrPoison()) {
       Elts[Idx] = std::nullopt;
       continue;
     }
@@ -3573,8 +3573,8 @@ static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG,
   SDValue DominantValue;
   unsigned MostCommonCount = 0;
   DenseMap<SDValue, unsigned> ValueCounts;
-  unsigned NumUndefElts =
-      count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
+  unsigned NumUndefElts = count_if(
+      Op->op_values(), [](const SDValue &V) { return V.isUndefOrPoison(); });
 
   // Track the number of scalar loads we know we'd be inserting, estimated as
   // any non-zero floating-point constant. Other kinds of element are either
@@ -3584,7 +3584,7 @@ static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG,
   unsigned NumScalarLoads = 0;
 
   for (SDValue V : Op->op_values()) {
-    if (V.isUndef())
+    if (V.isUndefOrPoison())
       continue;
 
     unsigned &Count = ValueCounts[V];
@@ -3622,7 +3622,7 @@ static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG,
     // is also better than using vmerge.vx as it avoids the need to
     // materialize the mask in a vector register.
     if (SDValue LastOp = Op->getOperand(Op->getNumOperands() - 1);
-        !LastOp.isUndef() && ValueCounts[LastOp] == 1 &&
+        !LastOp.isUndefOrPoison() && ValueCounts[LastOp] == 1 &&
         LastOp != DominantValue) {
       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
       auto OpCode =
@@ -3638,7 +3638,7 @@ static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG,
     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
     for (const auto &OpIdx : enumerate(Op->ops())) {
       const SDValue &V = OpIdx.value();
-      if (V.isUndef() || !Processed.insert(V).second)
+      if (V.isUndefOrPoison() || !Processed.insert(V).second)
         continue;
       if (ValueCounts[V] == 1) {
         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
@@ -3715,7 +3715,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
 
     for (unsigned I = 0; I < NumElts;) {
       SDValue V = Op.getOperand(I);
-      bool BitValue = !V.isUndef() && V->getAsZExtVal();
+      bool BitValue = !V.isUndefOrPoison() && V->getAsZExtVal();
       Bits |= ((uint64_t)BitValue << BitPos);
       ++BitPos;
       ++I;
@@ -3845,7 +3845,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
     // Construct the amalgamated value at this larger vector type.
     for (const auto &OpIdx : enumerate(Op->op_values())) {
       const auto &SeqV = OpIdx.value();
-      if (!SeqV.isUndef())
+      if (!SeqV.isUndefOrPoison())
         SplatValue |=
             ((SeqV->getAsZExtVal() & EltMask) << (OpIdx.index() * EltBitSize));
     }
@@ -3902,7 +3902,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
     // Construct the amalgamated value which can be splatted as this larger
     // vector type.
     for (const auto &SeqV : Sequence) {
-      if (!SeqV.isUndef())
+      if (!SeqV.isUndefOrPoison())
         SplatValue |=
             ((SeqV->getAsZExtVal() & EltMask) << (EltIdx * EltBitSize));
       EltIdx++;
@@ -4174,8 +4174,8 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
   // vslide1down path, we should be able to fold the vselect into the final
   // vslidedown (for the undef tail) for the first half w/ masking.
   unsigned NumElts = VT.getVectorNumElements();
-  unsigned NumUndefElts =
-      count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
+  unsigned NumUndefElts = count_if(
+      Op->op_values(), [](const SDValue &V) { return V.isUndefOrPoison(); });
   unsigned NumDefElts = NumElts - NumUndefElts;
   if (NumDefElts >= 8 && NumDefElts > NumElts / 2 &&
       ContainerVT.bitsLE(getLMUL1VT(ContainerVT))) {
@@ -4234,7 +4234,7 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
   // we use for integer constants here?
   unsigned UndefCount = 0;
   for (const SDValue &V : Op->ops()) {
-    if (V.isUndef()) {
+    if (V.isUndefOrPoison()) {
       UndefCount++;
       continue;
     }
@@ -4260,7 +4260,7 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
   SDValue Vec;
   UndefCount = 0;
   for (SDValue V : Op->ops()) {
-    if (V.isUndef()) {
+    if (V.isUndefOrPoison()) {
       UndefCount++;
       continue;
     }
@@ -4341,7 +4341,7 @@ static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
 
   // If the hi bits of the splat are undefined, then it's fine to just splat Lo
   // even if it might be sign extended.
-  if (Hi.isUndef())
+  if (Hi.isUndefOrPoison())
     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
 
   // Fall back to a stack store and stride x0 vector load.
@@ -4367,7 +4367,7 @@ static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
                                 MVT VT, const SDLoc &DL, SelectionDAG &DAG,
                                 const RISCVSubtarget &Subtarget) {
-  bool HasPassthru = Passthru && !Passthru.isUndef();
+  bool HasPassthru = Passthru && !Passthru.isUndefOrPoison();
   if (!HasPassthru && !Passthru)
     Passthru = DAG.getUNDEF(VT);
 
@@ -4475,7 +4475,7 @@ static SDValue lowerScalarInsert(SDValue Scalar, SDValue VL, MVT VT,
 static SDValue getSingleShuffleSrc(MVT VT, MVT ContainerVT, SDValue V1,
                                    SDValue V2) {
 
-  if (V2.isUndef() &&
+  if (V2.isUndefOrPoison() &&
       RISCVTargetLowering::getLMUL(ContainerVT) != RISCVII::VLMUL::LMUL_8)
     return V1;
 
@@ -4906,9 +4906,9 @@ static SDValue getWideningInterleave(SDValue EvenV, SDValue OddV,
 
   // FIXME: Not only does this optimize the code, it fixes some correctness
   // issues because MIR does not have freeze.
-  if (EvenV.isUndef())
+  if (EvenV.isUndefOrPoison())
     return getWideningSpread(OddV, 2, 1, DL, DAG);
-  if (OddV.isUndef())
+  if (OddV.isUndefOrPoison())
     return getWideningSpread(EvenV, 2, 0, DL, DAG);
 
   MVT VecVT = EvenV.getSimpleValueType();
@@ -5005,7 +5005,7 @@ static SDValue lowerBitreverseShuffle(ShuffleVectorSDNode *SVN,
 
   if (!ShuffleVectorInst::isReverseMask(SVN->getMask(),
                                         SVN->getMask().size()) ||
-      !SVN->getOperand(1).isUndef())
+      !SVN->getOperand(1).isUndefOrPoison())
     return SDValue();
 
   unsigned ViaEltSize = std::max((uint64_t)8, PowerOf2Ceil(NumElts));
@@ -5315,8 +5315,8 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
     // Promote i1 shuffle to i8 shuffle.
     MVT WidenVT = MVT::getVectorVT(MVT::i8, VT.getVectorElementCount());
     V1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, V1);
-    V2 = V2.isUndef() ? DAG.getUNDEF(WidenVT)
-                      : DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, V2);
+    V2 = V2.isUndefOrPoison() ? DAG.getUNDEF(WidenVT)
+                              : DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, V2);
     SDValue Shuffled = DAG.getVectorShuffle(WidenVT, DL, V1, V2, SVN->getMask());
     return DAG.getSetCC(DL, VT, Shuffled, DAG.getConstant(0, DL, WidenVT),
                         ISD::SETNE);
@@ -5466,7 +5466,7 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
     return convertFromScalableVector(VT, Res, DAG, Subtarget);
   }
 
-  if (ShuffleVectorInst::isReverseMask(Mask, NumElts) && V2.isUndef())
+  if (ShuffleVectorInst::isReverseMask(Mask, NumElts) && V2.isUndefOrPoison())
     return DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V1);
 
   // If this is a deinterleave(2,4,8) and we can widen the vector, then we can
@@ -5531,8 +5531,8 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
 
 
   // Handle any remaining single source shuffles
-  assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
-  if (V2.isUndef()) {
+  assert(!V1.isUndefOrPoison() && "Unexpected shuffle canonicalization");
+  if (V2.isUndefOrPoison()) {
     // We might be able to express the shuffle as a bitrotate. But even if we
     // don't have Zvkb and have to expand, the expanded sequence of approx. 2
     // shifts and a vor will have a higher throughput than a vrgather.
@@ -7252,7 +7252,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     for (const auto &OpIdx : enumerate(Op->ops())) {
       SDValue SubVec = OpIdx.value();
       // Don't insert undef subvectors.
-      if (SubVec.isUndef())
+      if (SubVec.isUndefOrPoison())
         continue;
       Vec =
           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
@@ -9148,7 +9148,7 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
                              Vec, Vec, ValLo, I32Mask, InsertI64VL);
       // If the source vector is undef don't pass along the tail elements from
       // the previous slide1down.
-      SDValue Tail = Vec.isUndef() ? Vec : ValInVec;
+      SDValue Tail = Vec.isUndefOrPoison() ? Vec : ValInVec;
       ValInVec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32ContainerVT,
                              Tail, ValInVec, ValHi, I32Mask, InsertI64VL);
       // Bitcast back to the right container type.
@@ -9512,7 +9512,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
     // Assume Policy operand is the last operand.
     uint64_t Policy = Operands[NumOps - 1]->getAsZExtVal();
     // We don't need to select maskedoff if it's undef.
-    if (MaskedOff.isUndef())
+    if (MaskedOff.isUndefOrPoison())
       return Vec;
     // TAMU
     if (Policy == RISCVII::TAIL_AGNOSTIC)
@@ -9796,7 +9796,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     SDValue VL = getVLOperand(Op);
 
     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
-    if (Op.getOperand(1).isUndef())
+    if (Op.getOperand(1).isUndefOrPoison())
       return SplattedVal;
     SDValue SplattedIdx =
         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
@@ -10467,7 +10467,7 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
   unsigned OrigIdx = Op.getConstantOperandVal(2);
   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
 
-  if (OrigIdx == 0 && Vec.isUndef())
+  if (OrigIdx == 0 && Vec.isUndefOrPoison())
     return Op;
 
   // We don't have the ability to slide mask vectors up indexed by their i1
@@ -10605,7 +10605,7 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
   // subregister operation). See below for how our VSLIDEUP works. We go via a
   // LMUL=1 type to avoid allocating a large register group to hold our
   // subvector.
-  if (RemIdx.isZero() && (ExactlyVecRegSized || Vec.isUndef())) {
+  if (RemIdx.isZero() && (ExactlyVecRegSized || Vec.isUndefOrPoison())) {
     if (SubVecVT.isFixedLengthVector()) {
       // We may get NoSubRegister if inserting at index 0 and the subvec
       // container is the same as the vector, e.g. vec=v4i32,subvec=v4i32,idx=0
@@ -13722,7 +13722,7 @@ static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG,
   SDValue ScalarV = Reduce.getOperand(2);
   EVT ScalarVT = ScalarV.getValueType();
   if (ScalarV.getOpcode() == ISD::INSERT_SUBVECTOR &&
-      ScalarV.getOperand(0)->isUndef() &&
+      ScalarV.getOperand(0)->isUndefOrPoison() &&
       isNullConstant(ScalarV.getOperand(2)))
     ScalarV = ScalarV.getOperand(1);
 
@@ -15181,7 +15181,8 @@ struct NodeExtensionHelper {
            "Unexpected Opcode");
 
     // The pasthru must be undef for tail agnostic.
-    if (Opc == RISCVISD::VMV_V_X_VL && !OrigOperand.getOperand(0).isUndef())
+    if (Opc == RISCVISD::VMV_V_X_VL &&
+        !OrigOperand.getOperand(0).isUndefOrPoison())
       return;
 
     // Get the scalar value.
@@ -15287,7 +15288,7 @@ struct NodeExtensionHelper {
     case RISCVISD::VFMV_V_F_VL: {
       MVT VT = OrigOperand.getSimpleValueType();
 
-      if (!OrigOperand.getOperand(0).isUndef())
+      if (!OrigOperand.getOperand(0).isUndefOrPoison())
         break;
 
       SDValue Op = OrigOperand.getOperand(1);
@@ -15825,7 +15826,7 @@ static SDValue combineVWADDSUBWSelect(SDNode *N, SelectionDAG &DAG) {
 
   // Passthru should be undef
   SDValue Passthru = N->getOperand(2);
-  if (!Passthru.isUndef())
+  if (!Passthru.isUndefOrPoison())
     return SDValue();
 
   // Mask should be all ones
@@ -15837,7 +15838,7 @@ static SDValue combineVWADDSUBWSelect(SDNode *N, SelectionDAG &DAG) {
   SDValue Z = MergeOp->getOperand(2);
 
   if (Z.getOpcode() == ISD::INSERT_SUBVECTOR &&
-      (isNullOrNullSplat(Z.getOperand(0)) || Z.getOperand(0).isUndef()))
+      (isNullOrNullSplat(Z.getOperand(0)) || Z.getOperand(0).isUndefOrPoison()))
     Z = Z.getOperand(1);
 
   if (!ISD::isConstantSplatVectorAllZeros(Z.getNode()))
@@ -16739,7 +16740,7 @@ static SDValue performBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
   SmallVector<SDValue> LHSOps;
   SmallVector<SDValue> RHSOps;
   for (SDValue Op : N->ops()) {
-    if (Op.isUndef()) {
+    if (Op.isUndefOrPoison()) {
       // We can't form a divide or remainder from undef.
       if (!DAG.isSafeToSpeculativelyExecute(Opcode))
         return SDValue();
@@ -16982,7 +16983,7 @@ static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG,
 
   if (N->getOpcode() == RISCVISD::ADD_VL) {
     SDValue AddPassthruOp = N->getOperand(2);
-    if (!AddPassthruOp.isUndef())
+    if (!AddPassthruOp.isUndefOrPoison())
       return SDValue();
   }
 
@@ -17005,7 +17006,7 @@ static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG,
 
   SDValue MulPassthruOp = MulOp.getOperand(2);
 
-  if (!MulPassthruOp.isUndef())
+  if (!MulPassthruOp.isUndefOrPoison())
     return SDValue();
 
   auto [AddMask, AddVL] = [](SDNode *N, SelectionDAG &DAG,
@@ -17090,7 +17091,7 @@ static bool matchIndexAsShuffle(EVT VT, SDValue Index, SDValue Mask,
   for (unsigned i = 0; i < Index->getNumOperands(); i++) {
     // TODO: We've found an active bit of UB, and could be
     // more aggressive here if desired.
-    if (Index->getOperand(i)->isUndef())
+    if (Index->getOperand(i)->isUndefOrPoison())
       return false;
     uint64_t C = Index->getConstantOperandVal(i);
     if (C % ElementSize != 0)
@@ -17133,7 +17134,7 @@ static bool matchIndexAsWiderOp(EVT VT, SDValue Index, SDValue Mask,
   for (unsigned i = 0; i < Index->getNumOperands(); i++) {
     // TODO: We've found an active bit of UB, and could be
     // more aggressive here if desired.
-    if (Index->getOperand(i)->isUndef())
+    if (Index->getOperand(i)->isUndefOrPoison())
       return false;
     // TODO: This offset check is too strict if we support fully
     // misaligned memory operations.
@@ -17222,14 +17223,15 @@ static SDValue combineTruncToVnclip(SDNode *N, SelectionDAG &DAG,
   auto MatchMinMax = [&VL, &Mask](SDValue V, unsigned Opc, unsigned OpcVL,
                                   APInt &SplatVal) {
     if (V.getOpcode() != Opc &&
-        !(V.getOpcode() == OpcVL && V.getOperand(2).isUndef() &&
+        !(V.getOpcode() == OpcVL && V.getOperand(2).isUndefOrPoison() &&
           V.getOperand(3) == Mask && V.getOperand(4) == VL))
       return SDValue();
 
     SDValue Op = V.getOperand(1);
 
     // Peek through conversion between fixed and scalable vectors.
-    if (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op.getOperand(0).isUndef() &&
+    if (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
+        Op.getOperand(0).isUndefOrPoison() &&
         isNullConstant(Op.getOperand(2)) &&
         Op.getOperand(1).getValueType().isFixedLengthVector() &&
         Op.getOperand(1).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
@@ -17240,8 +17242,8 @@ static SDValue combineTruncToVnclip(SDNode *N, SelectionDAG &DAG,
     if (ISD::isConstantSplatVector(Op.getNode(), SplatVal))
       return V.getOperand(0);
 
-    if (Op.getOpcode() == RISCVISD::VMV_V_X_VL && Op.getOperand(0).isUndef() &&
-        Op.getOperand(2) == VL) {
+    if (Op.getOpcode() == RISCVISD::VMV_V_X_VL &&
+        Op.getOperand(0).isUndefOrPoison() && Op.getOperand(2) == VL) {
       if (auto *Op1 = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
         SplatVal =
             Op1->getAPIntValue().sextOrTrunc(Op.getScalarValueSizeInBits());
@@ -17411,7 +17413,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
 
-    if (Op0->isUndef()) {
+    if (Op0->isUndefOrPoison()) {
       SDValue Lo = DAG.getUNDEF(MVT::i32);
       SDValue Hi = DAG.getUNDEF(MVT::i32);
       return DCI.CombineTo(N, Lo, Hi);
@@ -18114,7 +18116,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
       APInt NewC(Val.getValueSizeInBits(), 0);
       uint64_t EltSize = Val.getScalarValueSizeInBits();
       for (unsigned i = 0; i < Val.getNumOperands(); i++) {
-        if (Val.getOperand(i).isUndef())
+        if (Val.getOperand(i).isUndefOrPoison())
           continue;
         NewC.insertBits(Val.getConstantOperandAPInt(i).trunc(EltSize),
                         i * EltSize);
@@ -18223,7 +18225,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     // scalar input.
     unsigned ScalarSize = Scalar.getValueSizeInBits();
     unsigned EltWidth = VT.getScalarSizeInBits();
-    if (ScalarSize > EltWidth && Passthru.isUndef())
+    if (ScalarSize > EltWidth && Passthru.isUndefOrPoison())
       if (SimplifyDemandedLowBitsHelper(1, EltWidth))
         return SDValue(N, 0);
 
@@ -18242,7 +18244,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     // Try to remove vector->scalar->vector if the scalar->vector is inserting
     // into an undef vector.
     // TODO: Could use a vslide or vmv.v.v for non-undef.
-    if (N->getOperand(0).isUndef() &&
+    if (N->getOperand(0).isUndefOrPoison() &&
         Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
         isNullConstant(Src.getOperand(1)) &&
         Src.getOperand(0).getValueType().isScalableVector()) {
@@ -18262,7 +18264,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     SDValue Scalar = N->getOperand(1);
     SDValue VL = N->getOperand(2);
 
-    if (Scalar.getOpcode() == RISCVISD::VMV_X_S && Passthru.isUndef() &&
+    if (Scalar.getOpcode() == RISCVISD::VMV_X_S && Passthru.isUndefOrPoison() &&
         Scalar.getOperand(0).getValueType() == N->getValueType(0))
       return Scalar.getOperand(0);
 
@@ -18284,7 +18286,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     // no purpose.
     if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
         Const && !Const->isZero() && isInt<5>(Const->getSExtValue()) &&
-        VT.bitsLE(getLMUL1VT(VT)) && Passthru.isUndef())
+        VT.bitsLE(getLMUL1VT(VT)) && Passthru.isUndefOrPoison())
       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
 
     break;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index add51fac4b9e62a..e2c47215cb51c8f 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -4074,7 +4074,8 @@ static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
                               Vec->ops().slice(IdxVal, ElemsPerChunk));
 
   // Check if we're extracting the upper undef of a widening pattern.
-  if (Vec.getOpcode() == ISD::INSERT_SUBVECTOR && Vec.getOperand(0).isUndef() &&
+  if (Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
+      Vec.getOperand(0).isUndefOrPoison() &&
       Vec.getOperand(1).getValueType().getVectorNumElements() <= IdxVal &&
       isNullConstant(Vec.getOperand(2)))
     return DAG.getUNDEF(ResultVT);
@@ -4109,7 +4110,7 @@ static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
   assert((vectorWidth == 128 || vectorWidth == 256) &&
          "Unsupported vector width");
   // Inserting UNDEF is Result
-  if (Vec.isUndef())
+  if (Vec.isUndefOrPoison())
     return Result;
   EVT VT = Vec.getValueType();
   EVT ElVT = VT.getVectorElementType();
@@ -4154,7 +4155,7 @@ static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
     unsigned NumSrcElts = VecVT.getVectorNumElements();
     ArrayRef<SDUse> Hi = Vec->ops().drop_front(NumSrcElts / 2);
     if (all_of(Hi, [&](SDValue V) {
-          return V.isUndef() || (ZeroNewElements && X86::isZeroNode(V));
+          return V.isUndefOrPoison() || (ZeroNewElements && X86::isZeroNode(V));
         }))
       Vec = extract128BitVector(Vec, 0, DAG, dl);
   }
@@ -4218,7 +4219,7 @@ static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops,
 
     if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2)) {
       // insert_subvector(undef, x, lo)
-      if (Idx == 0 && Src.isUndef()) {
+      if (Idx == 0 && Src.isUndefOrPoison()) {
         Ops.push_back(Sub);
         Ops.push_back(DAG.getUNDEF(SubVT));
         return true;
@@ -4250,7 +4251,7 @@ static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops,
           return true;
         }
         // insert_subvector(undef, x, hi)
-        if (Src.isUndef()) {
+        if (Src.isUndefOrPoison()) {
           Ops.push_back(DAG.getUNDEF(SubVT));
           Ops.push_back(Sub);
           return true;
@@ -4275,7 +4276,7 @@ static SDValue isUpperSubvectorUndef(SDValue V, const SDLoc &DL,
   assert((NumSubOps % 2) == 0 && "Unexpected number of subvectors");
 
   ArrayRef<SDValue> UpperOps(SubOps.begin() + HalfNumSubOps, SubOps.end());
-  if (any_of(UpperOps, [](SDValue Op) { return !Op.isUndef(); }))
+  if (any_of(UpperOps, [](SDValue Op) { return !Op.isUndefOrPoison(); }))
     return SDValue();
 
   EVT HalfVT = V.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
@@ -4484,10 +4485,10 @@ static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
   unsigned IdxVal = Op.getConstantOperandVal(2);
 
   // Inserting undef is a nop. We can just return the original vector.
-  if (SubVec.isUndef())
+  if (SubVec.isUndefOrPoison())
     return Vec;
 
-  if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
+  if (IdxVal == 0 && Vec.isUndefOrPoison()) // the operation is legal
     return Op;
 
   MVT OpVT = Op.getSimpleValueType();
@@ -4533,7 +4534,7 @@ static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
                        Undef, SubVec, ZeroIdx);
 
-  if (Vec.isUndef()) {
+  if (Vec.isUndefOrPoison()) {
     assert(IdxVal != 0 && "Unexpected index");
     SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
                          DAG.getTargetConstant(IdxVal, dl, MVT::i8));
@@ -4544,7 +4545,7 @@ static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
     assert(IdxVal != 0 && "Unexpected index");
     // If upper elements of Vec are known undef, then just shift into place.
     if (llvm::all_of(Vec->ops().slice(IdxVal + SubVecNumElems),
-                     [](SDValue V) { return V.isUndef(); })) {
+                     [](SDValue V) { return V.isUndefOrPoison(); })) {
       SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
                            DAG.getTargetConstant(IdxVal, dl, MVT::i8));
     } else {
@@ -4729,15 +4730,17 @@ void llvm::createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
 // Attempt to constant fold, else just create a VECTOR_SHUFFLE.
 static SDValue getVectorShuffle(SelectionDAG &DAG, EVT VT, const SDLoc &dl,
                                 SDValue V1, SDValue V2, ArrayRef<int> Mask) {
-  if ((ISD::isBuildVectorOfConstantSDNodes(V1.getNode()) || V1.isUndef()) &&
-      (ISD::isBuildVectorOfConstantSDNodes(V2.getNode()) || V2.isUndef())) {
+  if ((ISD::isBuildVectorOfConstantSDNodes(V1.getNode()) ||
+       V1.isUndefOrPoison()) &&
+      (ISD::isBuildVectorOfConstantSDNodes(V2.getNode()) ||
+       V2.isUndefOrPoison())) {
     SmallVector<SDValue> Ops(Mask.size(), DAG.getUNDEF(VT.getScalarType()));
     for (int I = 0, NumElts = Mask.size(); I != NumElts; ++I) {
       int M = Mask[I];
       if (M < 0)
         continue;
       SDValue V = (M < NumElts) ? V1 : V2;
-      if (V.isUndef())
+      if (V.isUndefOrPoison())
         continue;
       Ops[I] = V.getOperand(M % NumElts);
     }
@@ -4990,7 +4993,7 @@ static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
   };
 
   // Handle UNDEFs.
-  if (Op.isUndef()) {
+  if (Op.isUndefOrPoison()) {
     APInt UndefSrcElts = APInt::getAllOnes(NumElts);
     SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
     return CastBitData(UndefSrcElts, SrcEltBits);
@@ -5773,7 +5776,7 @@ static void computeZeroableShuffleElements(ArrayRef<int> Mask,
     if ((Size % V.getNumOperands()) == 0) {
       int Scale = Size / V->getNumOperands();
       SDValue Op = V.getOperand(M / Scale);
-      if (Op.isUndef())
+      if (Op.isUndefOrPoison())
         KnownUndef.setBit(i);
       if (X86::isZeroNode(Op))
         KnownZero.setBit(i);
@@ -5799,7 +5802,7 @@ static void computeZeroableShuffleElements(ArrayRef<int> Mask,
       bool AllZero = true;
       for (int j = 0; j < Scale; ++j) {
         SDValue Op = V.getOperand((M * Scale) + j);
-        AllUndef &= Op.isUndef();
+        AllUndef &= Op.isUndefOrPoison();
         AllZero &= X86::isZeroNode(Op);
       }
       if (AllUndef)
@@ -5868,7 +5871,7 @@ static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
     M %= Size;
 
     // We are referencing an UNDEF input.
-    if (V.isUndef()) {
+    if (V.isUndefOrPoison()) {
       KnownUndef.setBit(i);
       continue;
     }
@@ -5893,7 +5896,7 @@ static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
     if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
       SDValue Vec = V.getOperand(0);
       int NumVecElts = Vec.getValueType().getVectorNumElements();
-      if (Vec.isUndef() && Size == NumVecElts) {
+      if (Vec.isUndefOrPoison() && Size == NumVecElts) {
         int Idx = V.getConstantOperandVal(2);
         int NumSubElts = V.getOperand(1).getValueType().getVectorNumElements();
         if (M < Idx || (Idx + NumSubElts) <= M)
@@ -6106,7 +6109,7 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
       InsertIdx *= (MaxElts / NumElts);
       ExtractIdx *= (MaxElts / NumSubSrcBCElts);
       NumSubElts *= (MaxElts / NumElts);
-      bool SrcIsUndef = Src.isUndef();
+      bool SrcIsUndef = Src.isUndefOrPoison();
       for (int i = 0; i != (int)MaxElts; ++i)
         Mask.push_back(SrcIsUndef ? SM_SentinelUndef : i);
       for (int i = 0; i != (int)NumSubElts; ++i)
@@ -6122,7 +6125,7 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
     if (Depth > 0 && InsertIdx == NumSubElts && NumElts == (2 * NumSubElts) &&
         NumBitsPerElt == 64 && NumSizeInBits == 512 &&
         Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
-        Src.getOperand(0).isUndef() &&
+        Src.getOperand(0).isUndefOrPoison() &&
         Src.getOperand(1).getValueType() == SubVT &&
         Src.getConstantOperandVal(2) == 0) {
       for (int i = 0; i != (int)NumSubElts; ++i)
@@ -6282,9 +6285,9 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
     // lanes), we can treat this as a truncation shuffle.
     bool Offset0 = false, Offset1 = false;
     if (Opcode == X86ISD::PACKSS) {
-      if ((!(N0.isUndef() || EltsLHS.isZero()) &&
+      if ((!(N0.isUndefOrPoison() || EltsLHS.isZero()) &&
            DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
-          (!(N1.isUndef() || EltsRHS.isZero()) &&
+          (!(N1.isUndefOrPoison() || EltsRHS.isZero()) &&
            DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
         return false;
       // We can't easily fold ASHR into a shuffle, but if it was feeding a
@@ -6302,9 +6305,9 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
       }
     } else {
       APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
-      if ((!(N0.isUndef() || EltsLHS.isZero()) &&
+      if ((!(N0.isUndefOrPoison() || EltsLHS.isZero()) &&
            !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
-          (!(N1.isUndef() || EltsRHS.isZero()) &&
+          (!(N1.isUndefOrPoison() || EltsRHS.isZero()) &&
            !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
         return false;
     }
@@ -6494,7 +6497,7 @@ static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
     int hi = lo + MaskWidth;
 
     // Strip UNDEF input usage.
-    if (Inputs[i].isUndef())
+    if (Inputs[i].isUndefOrPoison())
       for (int &M : Mask)
         if ((lo <= M) && (M < hi))
           M = SM_SentinelUndef;
@@ -6870,8 +6873,8 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, const SDLoc &DL,
   std::bitset<4> Zeroable, Undefs;
   for (int i = 0; i < 4; ++i) {
     SDValue Elt = Op.getOperand(i);
-    Undefs[i] = Elt.isUndef();
-    Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
+    Undefs[i] = Elt.isUndefOrPoison();
+    Zeroable[i] = (Elt.isUndefOrPoison() || X86::isZeroNode(Elt));
   }
   assert(Zeroable.size() - Zeroable.count() > 1 &&
          "We expect at least two non-zero elements!");
@@ -7127,7 +7130,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
     SDValue Elt = peekThroughBitcasts(Elts[i]);
     if (!Elt.getNode())
       return SDValue();
-    if (Elt.isUndef()) {
+    if (Elt.isUndefOrPoison()) {
       UndefMask.setBit(i);
       continue;
     }
@@ -7335,15 +7338,15 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
         if (!LoadMask[i])
           continue;
         SDValue Elt = peekThroughBitcasts(Elts[i]);
-        if (RepeatedLoads[i % SubElems].isUndef())
+        if (RepeatedLoads[i % SubElems].isUndefOrPoison())
           RepeatedLoads[i % SubElems] = Elt;
         else
           Match &= (RepeatedLoads[i % SubElems] == Elt);
       }
 
       // We must have loads at both ends of the repetition.
-      Match &= !RepeatedLoads.front().isUndef();
-      Match &= !RepeatedLoads.back().isUndef();
+      Match &= !RepeatedLoads.front().isUndefOrPoison();
+      Match &= !RepeatedLoads.back().isUndefOrPoison();
       if (!Match)
         continue;
 
@@ -7858,7 +7861,7 @@ static SDValue LowerBUILD_VECTORvXi1(SDValue Op, const SDLoc &dl,
   int SplatIdx = -1;
   for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
     SDValue In = Op.getOperand(idx);
-    if (In.isUndef())
+    if (In.isUndefOrPoison())
       continue;
     if (auto *InC = dyn_cast<ConstantSDNode>(In)) {
       Immediate |= (InC->getZExtValue() & 0x1) << idx;
@@ -7986,7 +7989,7 @@ static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
     SDValue Op = N->getOperand(i + BaseIdx);
 
     // Skip UNDEFs.
-    if (Op->isUndef()) {
+    if (Op->isUndefOrPoison()) {
       // Update the expected vector extract index.
       if (i * 2 == NumElts)
         ExpectedVExtractIdx = BaseIdx;
@@ -8016,13 +8019,13 @@ static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
     unsigned I1 = Op1.getConstantOperandVal(1);
 
     if (i * 2 < NumElts) {
-      if (V0.isUndef()) {
+      if (V0.isUndefOrPoison()) {
         V0 = Op0.getOperand(0);
         if (V0.getValueType() != VT)
           return false;
       }
     } else {
-      if (V1.isUndef()) {
+      if (V1.isUndefOrPoison()) {
         V1 = Op0.getOperand(0);
         if (V1.getValueType() != VT)
           return false;
@@ -8098,16 +8101,16 @@ static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
 
   if (Mode) {
     // Don't emit a horizontal binop if the result is expected to be UNDEF.
-    if (!isUndefLO && !V0->isUndef())
+    if (!isUndefLO && !V0->isUndefOrPoison())
       LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
-    if (!isUndefHI && !V1->isUndef())
+    if (!isUndefHI && !V1->isUndefOrPoison())
       HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
   } else {
     // Don't emit a horizontal binop if the result is expected to be UNDEF.
-    if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
+    if (!isUndefLO && (!V0_LO->isUndefOrPoison() || !V1_LO->isUndefOrPoison()))
       LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
 
-    if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
+    if (!isUndefHI && (!V0_HI->isUndefOrPoison() || !V1_HI->isUndefOrPoison()))
       HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
   }
 
@@ -8175,12 +8178,12 @@ static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
     Opc[i % 2] = Opcode;
 
     // Update InVec0 and InVec1.
-    if (InVec0.isUndef()) {
+    if (InVec0.isUndefOrPoison()) {
       InVec0 = Op0.getOperand(0);
       if (InVec0.getSimpleValueType() != VT)
         return false;
     }
-    if (InVec1.isUndef()) {
+    if (InVec1.isUndefOrPoison()) {
       InVec1 = Op1.getOperand(0);
       if (InVec1.getSimpleValueType() != VT)
         return false;
@@ -8209,8 +8212,8 @@ static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
   // Ensure we have found an opcode for both parities and that they are
   // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
   // inputs are undef.
-  if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
-      InVec0.isUndef() || InVec1.isUndef())
+  if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] || InVec0.isUndefOrPoison() ||
+      InVec1.isUndefOrPoison())
     return false;
 
   IsSubAdd = Opc[0] == ISD::FADD;
@@ -8334,7 +8337,7 @@ static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
     for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
       // Ignore undef elements.
       SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
-      if (Op.isUndef())
+      if (Op.isUndefOrPoison())
         continue;
 
       // If there's an opcode mismatch, we're done.
@@ -8367,10 +8370,10 @@ static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
       // The source vector is chosen based on which 64-bit half of the
       // destination vector is being calculated.
       if (j < NumEltsIn64Bits) {
-        if (V0.isUndef())
+        if (V0.isUndefOrPoison())
           V0 = Op0.getOperand(0);
       } else {
-        if (V1.isUndef())
+        if (V1.isUndefOrPoison())
           V1 = Op0.getOperand(0);
       }
 
@@ -8424,7 +8427,7 @@ static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
   unsigned NumElts = VT.getVectorNumElements();
   APInt DemandedElts = APInt::getAllOnes(NumElts);
   for (unsigned i = 0; i != NumElts; ++i)
-    if (BV->getOperand(i).isUndef())
+    if (BV->getOperand(i).isUndefOrPoison())
       DemandedElts.clearBit(i);
 
   // If we don't need the upper xmm, then perform as a xmm hop.
@@ -8446,7 +8449,7 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV, const SDLoc &DL,
                                    SelectionDAG &DAG) {
   // We need at least 2 non-undef elements to make this worthwhile by default.
   unsigned NumNonUndefs =
-      count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
+      count_if(BV->op_values(), [](SDValue V) { return !V.isUndefOrPoison(); });
   if (NumNonUndefs < 2)
     return SDValue();
 
@@ -8474,11 +8477,11 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV, const SDLoc &DL,
   unsigned NumUndefsLO = 0;
   unsigned NumUndefsHI = 0;
   for (unsigned i = 0, e = Half; i != e; ++i)
-    if (BV->getOperand(i)->isUndef())
+    if (BV->getOperand(i)->isUndefOrPoison())
       NumUndefsLO++;
 
   for (unsigned i = Half, e = NumElts; i != e; ++i)
-    if (BV->getOperand(i)->isUndef())
+    if (BV->getOperand(i)->isUndefOrPoison())
       NumUndefsHI++;
 
   SDValue InVec0, InVec1;
@@ -8490,15 +8493,19 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV, const SDLoc &DL,
     if (isHorizontalBinOpPart(BV, ISD::ADD, DL, DAG, 0, Half, InVec0, InVec1) &&
         isHorizontalBinOpPart(BV, ISD::ADD, DL, DAG, Half, NumElts, InVec2,
                               InVec3) &&
-        ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
-        ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
+        ((InVec0.isUndefOrPoison() || InVec2.isUndefOrPoison()) ||
+         InVec0 == InVec2) &&
+        ((InVec1.isUndefOrPoison() || InVec3.isUndefOrPoison()) ||
+         InVec1 == InVec3))
       X86Opcode = X86ISD::HADD;
     else if (isHorizontalBinOpPart(BV, ISD::SUB, DL, DAG, 0, Half, InVec0,
                                    InVec1) &&
              isHorizontalBinOpPart(BV, ISD::SUB, DL, DAG, Half, NumElts, InVec2,
                                    InVec3) &&
-             ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
-             ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
+             ((InVec0.isUndefOrPoison() || InVec2.isUndefOrPoison()) ||
+              InVec0 == InVec2) &&
+             ((InVec1.isUndefOrPoison() || InVec3.isUndefOrPoison()) ||
+              InVec1 == InVec3))
       X86Opcode = X86ISD::HSUB;
     else
       CanFold = false;
@@ -8512,9 +8519,10 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV, const SDLoc &DL,
       // Convert this build_vector into a pair of horizontal binops followed by
       // a concat vector. We must adjust the outputs from the partial horizontal
       // matching calls above to account for undefined vector halves.
-      SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
-      SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
-      assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
+      SDValue V0 = InVec0.isUndefOrPoison() ? InVec2 : InVec0;
+      SDValue V1 = InVec1.isUndefOrPoison() ? InVec3 : InVec1;
+      assert((!V0.isUndefOrPoison() || !V1.isUndefOrPoison()) &&
+             "Horizontal-op of undefs?");
       bool isUndefLO = NumUndefsLO == Half;
       bool isUndefHI = NumUndefsHI == Half;
       return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
@@ -9011,7 +9019,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
   unsigned NumConstants = NumElems;
   for (unsigned i = 0; i < NumElems; ++i) {
     SDValue Elt = Op.getOperand(i);
-    if (Elt.isUndef()) {
+    if (Elt.isUndefOrPoison()) {
       UndefMask.setBit(i);
       continue;
     }
@@ -9129,7 +9137,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
         ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
       else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
         ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
-      else if (!Elt.isUndef()) {
+      else if (!Elt.isUndefOrPoison()) {
         assert(!VarElt.getNode() && !InsIndex.getNode() &&
                "Expected one variable element in this vector");
         VarElt = Elt;
@@ -9376,13 +9384,14 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
   // For SSE 4.1, use insertps to put the high elements into the low element.
   if (Subtarget.hasSSE41() && EltVT != MVT::f16) {
     SDValue Result;
-    if (!Op.getOperand(0).isUndef())
+    if (!Op.getOperand(0).isUndefOrPoison())
       Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
     else
       Result = DAG.getUNDEF(VT);
 
     for (unsigned i = 1; i < NumElems; ++i) {
-      if (Op.getOperand(i).isUndef()) continue;
+      if (Op.getOperand(i).isUndefOrPoison())
+        continue;
       Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
                            Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
     }
@@ -9394,7 +9403,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
   // bottom slot of the vector (which generates no code for SSE).
   SmallVector<SDValue, 8> Ops(NumElems);
   for (unsigned i = 0; i < NumElems; ++i) {
-    if (!Op.getOperand(i).isUndef())
+    if (!Op.getOperand(i).isUndefOrPoison())
       Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
     else
       Ops[i] = DAG.getUNDEF(VT);
@@ -9437,7 +9446,7 @@ static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
   unsigned NonZeros = 0;
   for (unsigned i = 0; i != NumOperands; ++i) {
     SDValue SubVec = Op.getOperand(i);
-    if (SubVec.isUndef())
+    if (SubVec.isUndefOrPoison())
       continue;
     if (ISD::isFreezeUndef(SubVec.getNode())) {
         // If the freeze(undef) has multiple uses then we must fold to zero.
@@ -9503,7 +9512,7 @@ static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
   uint64_t NonZeros = 0;
   for (unsigned i = 0; i != NumOperands; ++i) {
     SDValue SubVec = Op.getOperand(i);
-    if (SubVec.isUndef())
+    if (SubVec.isUndefOrPoison())
       continue;
     assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
     if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
@@ -10626,13 +10635,15 @@ static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
     unsigned NumBits2 = N2.getScalarValueSizeInBits();
     bool IsZero1 = llvm::isNullOrNullSplat(N1, /*AllowUndefs*/ false);
     bool IsZero2 = llvm::isNullOrNullSplat(N2, /*AllowUndefs*/ false);
-    if ((!N1.isUndef() && !IsZero1 && NumBits1 != NumSrcBits) ||
-        (!N2.isUndef() && !IsZero2 && NumBits2 != NumSrcBits))
+    if ((!N1.isUndefOrPoison() && !IsZero1 && NumBits1 != NumSrcBits) ||
+        (!N2.isUndefOrPoison() && !IsZero2 && NumBits2 != NumSrcBits))
       return false;
     if (Subtarget.hasSSE41() || BitSize == 8) {
       APInt ZeroMask = APInt::getHighBitsSet(NumSrcBits, NumPackedBits);
-      if ((N1.isUndef() || IsZero1 || DAG.MaskedValueIsZero(N1, ZeroMask)) &&
-          (N2.isUndef() || IsZero2 || DAG.MaskedValueIsZero(N2, ZeroMask))) {
+      if ((N1.isUndefOrPoison() || IsZero1 ||
+           DAG.MaskedValueIsZero(N1, ZeroMask)) &&
+          (N2.isUndefOrPoison() || IsZero2 ||
+           DAG.MaskedValueIsZero(N2, ZeroMask))) {
         V1 = N1;
         V2 = N2;
         SrcVT = PackVT;
@@ -10642,9 +10653,9 @@ static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
     }
     bool IsAllOnes1 = llvm::isAllOnesOrAllOnesSplat(N1, /*AllowUndefs*/ false);
     bool IsAllOnes2 = llvm::isAllOnesOrAllOnesSplat(N2, /*AllowUndefs*/ false);
-    if ((N1.isUndef() || IsZero1 || IsAllOnes1 ||
+    if ((N1.isUndefOrPoison() || IsZero1 || IsAllOnes1 ||
          DAG.ComputeNumSignBits(N1) > NumPackedBits) &&
-        (N2.isUndef() || IsZero2 || IsAllOnes2 ||
+        (N2.isUndefOrPoison() || IsZero2 || IsAllOnes2 ||
          DAG.ComputeNumSignBits(N2) > NumPackedBits)) {
       V1 = N1;
       V2 = N2;
@@ -10811,9 +10822,9 @@ static bool matchShuffleAsBlend(MVT VT, SDValue V1, SDValue V2,
                                 const APInt &Zeroable, bool &ForceV1Zero,
                                 bool &ForceV2Zero, uint64_t &BlendMask) {
   bool V1IsZeroOrUndef =
-      V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
+      V1.isUndefOrPoison() || ISD::isBuildVectorAllZeros(V1.getNode());
   bool V2IsZeroOrUndef =
-      V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
+      V2.isUndefOrPoison() || ISD::isBuildVectorAllZeros(V2.getNode());
 
   BlendMask = 0;
   ForceV1Zero = false, ForceV2Zero = false;
@@ -11111,9 +11122,9 @@ static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
     // Normalize the mask value depending on whether it's V1 or V2.
     int NormM = M;
     SDValue &Op = Ops[Elt & 1];
-    if (M < NumElts && (Op.isUndef() || Op == V1))
+    if (M < NumElts && (Op.isUndefOrPoison() || Op == V1))
       Op = V1;
-    else if (NumElts <= M && (Op.isUndef() || Op == V2)) {
+    else if (NumElts <= M && (Op.isUndefOrPoison() || Op == V2)) {
       Op = V2;
       NormM -= NumElts;
     } else
@@ -11182,7 +11193,7 @@ static SDValue lowerShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
   assert(Mask.size() >= 2 && "Single element masks are invalid.");
 
   // This routine only supports 128-bit integer dual input vectors.
-  if (VT.isFloatingPoint() || !VT.is128BitVector() || V2.isUndef())
+  if (VT.isFloatingPoint() || !VT.is128BitVector() || V2.isUndefOrPoison())
     return SDValue();
 
   int NumLoInputs =
@@ -13060,7 +13071,7 @@ static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
   assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
   assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
 
-  if (V2.isUndef()) {
+  if (V2.isUndefOrPoison()) {
     // Check for being able to broadcast a single element.
     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
                                                     Mask, Subtarget, DAG))
@@ -13144,7 +13155,7 @@ static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
   assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
   assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
 
-  if (V2.isUndef()) {
+  if (V2.isUndefOrPoison()) {
     // Check for being able to broadcast a single element.
     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
                                                     Mask, Subtarget, DAG))
@@ -14371,7 +14382,7 @@ static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
                                      SelectionDAG &DAG) {
   // Commute binary inputs so V2 is a load to simplify VPERMI2/T2 folds.
   SmallVector<int, 32> Mask(OriginalMask);
-  if (!V2.isUndef() && isShuffleFoldableLoad(V1) &&
+  if (!V2.isUndefOrPoison() && isShuffleFoldableLoad(V1) &&
       !isShuffleFoldableLoad(V2)) {
     ShuffleVectorSDNode::commuteMask(Mask);
     std::swap(V1, V2);
@@ -14399,7 +14410,7 @@ static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
   }
 
   SDValue Result;
-  if (V2.isUndef())
+  if (V2.isUndefOrPoison())
     Result = DAG.getNode(X86ISD::VPERMV, DL, ShuffleVT, MaskNode, V1);
   else
     Result = DAG.getNode(X86ISD::VPERMV3, DL, ShuffleVT, V1, MaskNode, V2);
@@ -14588,7 +14599,7 @@ static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
     return V;
 
   // Check for compaction patterns.
-  bool IsSingleInput = V2.isUndef();
+  bool IsSingleInput = V2.isUndefOrPoison();
   int NumEvenDrops = canLowerByDroppingElements(Mask, true, IsSingleInput);
 
   // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
@@ -14943,7 +14954,8 @@ static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
                                           const APInt &Zeroable,
                                           const X86Subtarget &Subtarget,
                                           SelectionDAG &DAG) {
-  assert(!V2.isUndef() && "This routine must not be used to lower single-input "
+  assert(!V2.isUndefOrPoison() &&
+         "This routine must not be used to lower single-input "
          "shuffles as it could then recurse on itself.");
   int Size = Mask.size();
 
@@ -15035,7 +15047,7 @@ static SDValue lowerShuffleAsLanePermuteAndPermute(
   int NumElts = VT.getVectorNumElements();
   int NumLanes = VT.getSizeInBits() / 128;
   int NumEltsPerLane = NumElts / NumLanes;
-  bool CanUseSublanes = Subtarget.hasAVX2() && V2.isUndef();
+  bool CanUseSublanes = Subtarget.hasAVX2() && V2.isUndefOrPoison();
 
   /// Attempts to find a sublane permute with the given size
   /// that gets all elements into their target lanes.
@@ -15194,7 +15206,7 @@ static SDValue lowerShuffleAsLanePermuteAndShuffle(
   }
 
   // TODO - we could support shuffling V2 in the Flipped input.
-  assert(V2.isUndef() &&
+  assert(V2.isUndefOrPoison() &&
          "This last part of this routine only works on single input shuffles");
 
   SmallVector<int> InLaneMask;
@@ -15224,7 +15236,7 @@ static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
                                   const APInt &Zeroable,
                                   const X86Subtarget &Subtarget,
                                   SelectionDAG &DAG) {
-  if (V2.isUndef()) {
+  if (V2.isUndefOrPoison()) {
     // Attempt to match VBROADCAST*128 subvector broadcast load.
     bool SplatLo = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1);
     bool SplatHi = isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1);
@@ -15243,7 +15255,8 @@ static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
       return SDValue();
   }
 
-  bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
+  bool V2IsZero =
+      !V2.isUndefOrPoison() && ISD::isBuildVectorAllZeros(V2.getNode());
 
   SmallVector<int, 4> WidenedMask;
   if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
@@ -15342,7 +15355,7 @@ static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
-  assert(!V2.isUndef() && "This is only useful with multiple inputs.");
+  assert(!V2.isUndefOrPoison() && "This is only useful with multiple inputs.");
 
   if (is128BitLaneRepeatedShuffleMask(VT, Mask))
     return SDValue();
@@ -15676,7 +15689,7 @@ static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
         // canonicalized to undef), then we can use vpermpd. Otherwise, we
         // are better off extracting the upper half of 1 operand and using a
         // narrow shuffle.
-        if (EltWidth == 64 && V2.isUndef())
+        if (EltWidth == 64 && V2.isUndefOrPoison())
           return SDValue();
       }
       // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
@@ -15901,7 +15914,7 @@ static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
     bool OnlyLowestElts = isUndefOrInRange(Mask, 0, NumLaneElts);
     MinSubLaneScale = 2;
     MaxSubLaneScale =
-        (!OnlyLowestElts && V2.isUndef() && VT == MVT::v32i8) ? 4 : 2;
+        (!OnlyLowestElts && V2.isUndefOrPoison() && VT == MVT::v32i8) ? 4 : 2;
   }
   if (Subtarget.hasBWI() && VT == MVT::v64i8)
     MinSubLaneScale = MaxSubLaneScale = 4;
@@ -16119,7 +16132,7 @@ static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                      Subtarget, DAG))
     return V;
 
-  if (V2.isUndef()) {
+  if (V2.isUndefOrPoison()) {
     // Check for being able to broadcast a single element.
     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
                                                     Mask, Subtarget, DAG))
@@ -16256,7 +16269,7 @@ static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 Subtarget, DAG, /*BitwiseOnly*/ true))
       return Shift;
 
-  if (V2.isUndef()) {
+  if (V2.isUndefOrPoison()) {
     // When the shuffle is mirrored between the 128-bit lanes of the unit, we
     // can use lower latency instructions that will operate on both lanes.
     SmallVector<int, 2> RepeatedMask;
@@ -16383,7 +16396,7 @@ static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
     if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
       return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
 
-    if (V2.isUndef())
+    if (V2.isUndefOrPoison())
       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
 
@@ -16404,7 +16417,7 @@ static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
 
   // If we have a single input shuffle with different shuffle patterns in the
   // two 128-bit lanes use the variable mask to VPERMILPS.
-  if (V2.isUndef()) {
+  if (V2.isUndefOrPoison()) {
     if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask)) {
       SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
       return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
@@ -16490,7 +16503,7 @@ static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
   // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
   // since after split we get a more efficient code than vblend by using
   // vpunpcklwd and vpunpckhwd instrs.
-  if (isUnpackWdShuffleMask(Mask, MVT::v8i32, DAG) && !V2.isUndef() &&
+  if (isUnpackWdShuffleMask(Mask, MVT::v8i32, DAG) && !V2.isUndefOrPoison() &&
       !Subtarget.hasAVX512())
     return lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask, Zeroable,
                                       Subtarget, DAG);
@@ -16524,7 +16537,7 @@ static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
       is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
   if (Is128BitLaneRepeatedShuffle) {
     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
-    if (V2.isUndef())
+    if (V2.isUndefOrPoison())
       return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
 
@@ -16566,7 +16579,7 @@ static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
           DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
     return V;
 
-  if (V2.isUndef()) {
+  if (V2.isUndefOrPoison()) {
     // Try to produce a fixed cross-128-bit lane permute followed by unpack
     // because that should be faster than the variable permute alternatives.
     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v8i32, V1, V2, Mask, DAG))
@@ -16660,7 +16673,7 @@ static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
           DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
     return V;
 
-  if (V2.isUndef()) {
+  if (V2.isUndefOrPoison()) {
     // Try to use bit rotation instructions.
     if (SDValue Rotate =
             lowerShuffleAsBitRotate(DL, MVT::v16i16, V1, Mask, Subtarget, DAG))
@@ -16778,7 +16791,7 @@ static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
     return Rotate;
 
   // Try to use bit rotation instructions.
-  if (V2.isUndef())
+  if (V2.isUndefOrPoison())
     if (SDValue Rotate =
             lowerShuffleAsBitRotate(DL, MVT::v32i8, V1, Mask, Subtarget, DAG))
       return Rotate;
@@ -16791,7 +16804,8 @@ static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
 
   // There are no generalized cross-lane shuffle operations available on i8
   // element types.
-  if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
+  if (V2.isUndefOrPoison() &&
+      is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
     // Try to produce a fixed cross-128-bit lane permute followed by unpack
     // because that should be faster than the variable permute alternatives.
     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v32i8, V1, V2, Mask, DAG))
@@ -17014,7 +17028,7 @@ static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
 
     SDValue Op = Widened128Mask[i] >= 4 ? V2 : V1;
     unsigned OpIndex = i / 2;
-    if (Ops[OpIndex].isUndef())
+    if (Ops[OpIndex].isUndefOrPoison())
       Ops[OpIndex] = Op;
     else if (Ops[OpIndex] != Op)
       return SDValue();
@@ -17035,7 +17049,7 @@ static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
   assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
 
-  if (V2.isUndef()) {
+  if (V2.isUndefOrPoison()) {
     // Use low duplicate instructions for masks that match their pattern.
     if (isShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6}, V1, V2))
       return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
@@ -17101,7 +17115,7 @@ static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
     if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
       return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
 
-    if (V2.isUndef())
+    if (V2.isUndefOrPoison())
       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
 
@@ -17133,7 +17147,7 @@ static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
 
   // If we have a single input shuffle with different shuffle patterns in the
   // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
-  if (V2.isUndef() &&
+  if (V2.isUndefOrPoison() &&
       !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
     SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
     return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
@@ -17163,7 +17177,7 @@ static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 Subtarget, DAG, /*BitwiseOnly*/ true))
       return Shift;
 
-  if (V2.isUndef()) {
+  if (V2.isUndefOrPoison()) {
     // When the shuffle is mirrored between the 128-bit lanes of the unit, we
     // can use lower latency instructions that will operate on all four
     // 128-bit lanes.
@@ -17258,7 +17272,7 @@ static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
       is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
   if (Is128BitLaneRepeatedShuffle) {
     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
-    if (V2.isUndef())
+    if (V2.isUndefOrPoison())
       return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
 
@@ -17354,7 +17368,7 @@ static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                                 Subtarget, DAG))
     return Rotate;
 
-  if (V2.isUndef()) {
+  if (V2.isUndefOrPoison()) {
     // Try to use bit rotation instructions.
     if (SDValue Rotate =
             lowerShuffleAsBitRotate(DL, MVT::v32i16, V1, Mask, Subtarget, DAG))
@@ -17380,7 +17394,7 @@ static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
 
   // Try to simplify this by merging 128-bit lanes to enable a lane-based
   // shuffle.
-  if (!V2.isUndef())
+  if (!V2.isUndefOrPoison())
     if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
             DL, MVT::v32i16, V1, V2, Mask, Subtarget, DAG))
       return Result;
@@ -17426,7 +17440,7 @@ static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
     return Rotate;
 
   // Try to use bit rotation instructions.
-  if (V2.isUndef())
+  if (V2.isUndefOrPoison())
     if (SDValue Rotate =
             lowerShuffleAsBitRotate(DL, MVT::v64i8, V1, Mask, Subtarget, DAG))
       return Rotate;
@@ -17470,7 +17484,7 @@ static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
 
   // Try to simplify this by merging 128-bit lanes to enable a lane-based
   // shuffle.
-  if (!V2.isUndef())
+  if (!V2.isUndefOrPoison())
     if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
             DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
       return Result;
@@ -17566,7 +17580,7 @@ static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
                                          const X86Subtarget &Subtarget,
                                          SelectionDAG &DAG) {
   // Shuffle should be unary.
-  if (!V2.isUndef())
+  if (!V2.isUndefOrPoison())
     return SDValue();
 
   int ShiftAmt = -1;
@@ -17917,8 +17931,8 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
   assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
          "Can't lower MMX shuffles");
 
-  bool V1IsUndef = V1.isUndef();
-  bool V2IsUndef = V2.isUndef();
+  bool V1IsUndef = V1.isUndefOrPoison();
+  bool V2IsUndef = V2.isUndefOrPoison();
   if (V1IsUndef && V2IsUndef)
     return DAG.getUNDEF(VT);
 
@@ -18077,10 +18091,11 @@ static SDValue lowerVECTOR_COMPRESS(SDValue Op, const X86Subtarget &Subtarget,
                          DAG, DL);
     Mask = widenSubVector(LargeMaskVT, Mask, /*ZeroNewElements=*/true,
                           Subtarget, DAG, DL);
-    Passthru = Passthru.isUndef() ? DAG.getUNDEF(LargeVecVT)
-                                  : widenSubVector(LargeVecVT, Passthru,
-                                                   /*ZeroNewElements=*/false,
-                                                   Subtarget, DAG, DL);
+    Passthru =
+        Passthru.isUndefOrPoison()
+            ? DAG.getUNDEF(LargeVecVT)
+            : widenSubVector(LargeVecVT, Passthru,
+                             /*ZeroNewElements=*/false, Subtarget, DAG, DL);
 
     SDValue Compressed =
         DAG.getNode(ISD::VECTOR_COMPRESS, DL, LargeVecVT, Vec, Mask, Passthru);
@@ -18094,7 +18109,7 @@ static SDValue lowerVECTOR_COMPRESS(SDValue Op, const X86Subtarget &Subtarget,
     EVT LargeVecVT = MVT::getVectorVT(LageElementVT, NumElements);
 
     Vec = DAG.getNode(ISD::ANY_EXTEND, DL, LargeVecVT, Vec);
-    Passthru = Passthru.isUndef()
+    Passthru = Passthru.isUndefOrPoison()
                    ? DAG.getUNDEF(LargeVecVT)
                    : DAG.getNode(ISD::ANY_EXTEND, DL, LargeVecVT, Passthru);
 
@@ -20719,7 +20734,7 @@ static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
   std::tie(Lo, Hi) = splitVector(In, DAG, DL);
 
   // If Hi is undef, then don't bother packing it and widen the result instead.
-  if (Hi.isUndef()) {
+  if (Hi.isUndefOrPoison()) {
     EVT DstHalfVT = DstVT.getHalfNumVectorElementsVT(Ctx);
     if (SDValue Res =
             truncateVectorWithPACK(Opcode, DstHalfVT, Lo, DL, DAG, Subtarget))
@@ -25847,7 +25862,7 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
 
   SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
 
-  if (PreservedSrc.isUndef())
+  if (PreservedSrc.isUndefOrPoison())
     PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
   return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
 }
@@ -25880,7 +25895,7 @@ static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
       Op.getOpcode() == X86ISD::VFPCLASSS)
     return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
 
-  if (PreservedSrc.isUndef())
+  if (PreservedSrc.isUndefOrPoison())
     PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
   return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
 }
@@ -26511,7 +26526,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
         return Op.getOperand(1);
 
       // Avoid false dependency.
-      if (PassThru.isUndef())
+      if (PassThru.isUndefOrPoison())
         PassThru = getZeroVector(VT, Subtarget, DAG, dl);
 
       return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
@@ -26660,7 +26675,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
         return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
 
       // Break false dependency.
-      if (PassThru.isUndef())
+      if (PassThru.isUndefOrPoison())
         PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
 
       return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
@@ -27020,7 +27035,7 @@ static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
   // If source is undef or we know it won't be used, use a zero vector
   // to break register dependency.
   // TODO: use undef instead and let BreakFalseDeps deal with it?
-  if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
+  if (Src.isUndefOrPoison() || ISD::isBuildVectorAllOnes(Mask.getNode()))
     Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
 
   // Cast mask to an integer type.
@@ -27061,7 +27076,7 @@ static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
   // If source is undef or we know it won't be used, use a zero vector
   // to break register dependency.
   // TODO: use undef instead and let BreakFalseDeps deal with it?
-  if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
+  if (Src.isUndefOrPoison() || ISD::isBuildVectorAllOnes(Mask.getNode()))
     Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
 
   MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
@@ -28947,7 +28962,7 @@ static SDValue LowerFMINIMUM_FMAXIMUM(SDValue Op, const X86Subtarget &Subtarget,
     if (Op->getOpcode() == ISD::BUILD_VECTOR ||
         Op->getOpcode() == ISD::SPLAT_VECTOR) {
       for (const SDValue &OpVal : Op->op_values()) {
-        if (OpVal.isUndef())
+        if (OpVal.isUndefOrPoison())
           continue;
         auto *CstOp = dyn_cast<ConstantFPSDNode>(OpVal);
         if (!CstOp)
@@ -30138,7 +30153,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
   if (ConstantAmt) {
     for (unsigned I = 0; I != NumElts; ++I) {
       SDValue A = Amt.getOperand(I);
-      if (A.isUndef() || A->getAsAPIntVal().uge(EltSizeInBits))
+      if (A.isUndefOrPoison() || A->getAsAPIntVal().uge(EltSizeInBits))
         continue;
       unsigned CstAmt = A->getAsAPIntVal().getZExtValue();
       if (UniqueCstAmt.count(CstAmt)) {
@@ -30215,19 +30230,20 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
       for (unsigned SrcI = 0, E = AmtWideElts.size(); SrcI != E; SrcI += 2) {
         unsigned DstI = SrcI / 2;
         // Both elements are undef? Make a note and keep going.
-        if (AmtWideElts[SrcI].isUndef() && AmtWideElts[SrcI + 1].isUndef()) {
+        if (AmtWideElts[SrcI].isUndefOrPoison() &&
+            AmtWideElts[SrcI + 1].isUndefOrPoison()) {
           TmpAmtWideElts[DstI] = AmtWideElts[SrcI];
           continue;
         }
         // Even element is undef? We will shift it by the same shift amount as
         // the odd element.
-        if (AmtWideElts[SrcI].isUndef()) {
+        if (AmtWideElts[SrcI].isUndefOrPoison()) {
           TmpAmtWideElts[DstI] = AmtWideElts[SrcI + 1];
           continue;
         }
         // Odd element is undef? We will shift it by the same shift amount as
         // the even element.
-        if (AmtWideElts[SrcI + 1].isUndef()) {
+        if (AmtWideElts[SrcI + 1].isUndefOrPoison()) {
           TmpAmtWideElts[DstI] = AmtWideElts[SrcI];
           continue;
         }
@@ -32715,7 +32731,7 @@ static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
   if (InVT == NVT)
     return InOp;
 
-  if (InOp.isUndef())
+  if (InOp.isUndefOrPoison())
     return DAG.getUNDEF(NVT);
 
   assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
@@ -32730,7 +32746,7 @@ static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
   if (InOp.getOpcode() == ISD::CONCAT_VECTORS && InOp.getNumOperands() == 2) {
     SDValue N1 = InOp.getOperand(1);
     if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
-        N1.isUndef()) {
+        N1.isUndefOrPoison()) {
       InOp = InOp.getOperand(0);
       InVT = InOp.getSimpleValueType();
       InNumElts = InVT.getVectorNumElements();
@@ -32828,7 +32844,8 @@ static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
   // Handle AVX masked loads which don't support passthru other than 0.
   if (MaskVT.getVectorElementType() != MVT::i1) {
     // We also allow undef in the isel pattern.
-    if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
+    if (PassThru.isUndefOrPoison() ||
+        ISD::isBuildVectorAllZeros(PassThru.getNode()))
       return Op;
 
     SDValue NewLoad = DAG.getMaskedLoad(
@@ -32962,7 +32979,7 @@ static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
   }
 
   // Break dependency on the data register.
-  if (PassThru.isUndef())
+  if (PassThru.isUndefOrPoison())
     PassThru = getZeroVector(VT, Subtarget, DAG, dl);
 
   SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
@@ -33135,8 +33152,9 @@ SDValue X86TargetLowering::visitMaskedLoad(
   EVT VTy = PassThru.getValueType();
   EVT Ty = VTy.getVectorElementType();
   SDVTList Tys = DAG.getVTList(Ty, MVT::Other);
-  auto ScalarPassThru = PassThru.isUndef() ? DAG.getConstant(0, DL, Ty)
-                                           : DAG.getBitcast(Ty, PassThru);
+  auto ScalarPassThru = PassThru.isUndefOrPoison()
+                            ? DAG.getConstant(0, DL, Ty)
+                            : DAG.getBitcast(Ty, PassThru);
   auto Flags = getFlagsOfCmpZeroFori1(DAG, DL, Mask);
   auto COND_NE = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
   SDValue Ops[] = {Chain, Ptr, ScalarPassThru, COND_NE, Flags};
@@ -37986,7 +38004,7 @@ X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
       if (!ISD::isBuildVectorOfConstantSDNodes(V.getNode()))
         return false;
       for (unsigned i = 0, e = V.getNumOperands(); i != e; ++i) {
-        if (!DemandedElts[i] || V.getOperand(i).isUndef())
+        if (!DemandedElts[i] || V.getOperand(i).isUndefOrPoison())
           continue;
         const APInt &Val = V.getConstantOperandAPInt(i);
         if (Val.getBitWidth() > Val.getNumSignBits() &&
@@ -39585,7 +39603,7 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
 
         SDValue Op = ScaledMask[i] >= 4 ? V2 : V1;
         unsigned OpIndex = i / 2;
-        if (Ops[OpIndex].isUndef())
+        if (Ops[OpIndex].isUndefOrPoison())
           Ops[OpIndex] = Op;
         else if (Ops[OpIndex] != Op)
           return SDValue();
@@ -40236,7 +40254,7 @@ static SDValue combineX86ShuffleChainWithExtract(
         continue;
       }
       if (Input.getOpcode() == ISD::INSERT_SUBVECTOR &&
-          Input.getOperand(0).isUndef()) {
+          Input.getOperand(0).isUndefOrPoison()) {
         Input = peekThroughBitcasts(Input.getOperand(1));
         continue;
       }
@@ -40289,7 +40307,7 @@ static SDValue combineX86ShuffleChainWithExtract(
       }
       // TODO: Handle insertions into upper subvectors.
       if (Input.getOpcode() == ISD::INSERT_SUBVECTOR &&
-          Input.getOperand(0).isUndef() &&
+          Input.getOperand(0).isUndefOrPoison() &&
           isNullConstant(Input.getOperand(2))) {
         Input = peekThroughBitcasts(Input.getOperand(1));
         continue;
@@ -40924,7 +40942,8 @@ static SDValue combineX86ShufflesRecursively(
   // TODO: Can resolveTargetShuffleInputsAndMask do some of this?
   for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
     SDValue &Op = Ops[I];
-    if (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op.getOperand(0).isUndef() &&
+    if (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
+        Op.getOperand(0).isUndefOrPoison() &&
         isNullConstant(Op.getOperand(2))) {
       Op = Op.getOperand(1);
       unsigned Scale = RootSizeInBits / Op.getValueSizeInBits();
@@ -41663,13 +41682,13 @@ static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
   EVT SrcVT0 = Src0.getValueType();
   EVT SrcVT1 = Src1.getValueType();
 
-  if (!Src1.isUndef() && (SrcVT0 != SrcVT1 || SrcOpc0 != SrcOpc1))
+  if (!Src1.isUndefOrPoison() && (SrcVT0 != SrcVT1 || SrcOpc0 != SrcOpc1))
     return SDValue();
 
   switch (SrcOpc0) {
   case X86ISD::MOVDDUP: {
     SDValue LHS = Src0.getOperand(0);
-    SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
+    SDValue RHS = Src1.isUndefOrPoison() ? Src1 : Src1.getOperand(0);
     SDValue Res =
         DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS, V.getOperand(2));
     Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res);
@@ -41687,9 +41706,9 @@ static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
   case X86ISD::VSRLI:
   case X86ISD::VSRAI:
   case X86ISD::PSHUFD:
-    if (Src1.isUndef() || Src0.getOperand(1) == Src1.getOperand(1)) {
+    if (Src1.isUndefOrPoison() || Src0.getOperand(1) == Src1.getOperand(1)) {
       SDValue LHS = Src0.getOperand(0);
-      SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
+      SDValue RHS = Src1.isUndefOrPoison() ? Src1 : Src1.getOperand(0);
       SDValue Res = DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS,
                                 V.getOperand(2));
       Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res, Src0.getOperand(1));
@@ -42036,7 +42055,8 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
     if (!DCI.isBeforeLegalizeOps() && N0.hasOneUse()) {
       SDValue V = peekThroughOneUseBitcasts(N0);
 
-      if (V.getOpcode() == ISD::INSERT_SUBVECTOR && V.getOperand(0).isUndef() &&
+      if (V.getOpcode() == ISD::INSERT_SUBVECTOR &&
+          V.getOperand(0).isUndefOrPoison() &&
           isNullConstant(V.getOperand(2))) {
         SDValue In = V.getOperand(1);
         MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
@@ -42193,9 +42213,9 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
     SDValue LHS = N->getOperand(0);
     SDValue RHS = N->getOperand(1);
     if (LHS.getOpcode() == ISD::BITCAST &&
-        (RHS.getOpcode() == ISD::BITCAST || RHS.isUndef())) {
+        (RHS.getOpcode() == ISD::BITCAST || RHS.isUndefOrPoison())) {
       EVT SrcVT = LHS.getOperand(0).getValueType();
-      if (RHS.isUndef() || SrcVT == RHS.getOperand(0).getValueType()) {
+      if (RHS.isUndefOrPoison() || SrcVT == RHS.getOperand(0).getValueType()) {
         return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT,
                                               DAG.getBitcast(SrcVT, LHS),
                                               DAG.getBitcast(SrcVT, RHS),
@@ -42308,12 +42328,12 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
     unsigned ZeroMask = InsertPSMask & 0xF;
 
     // If we zero out all elements from Op0 then we don't need to reference it.
-    if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
+    if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndefOrPoison())
       return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
 
     // If we zero out the element from Op1 then we don't need to reference it.
-    if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
+    if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndefOrPoison())
       return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
 
@@ -42733,8 +42753,8 @@ static SDValue combineShuffleOfConcatUndef(SDNode *N, const SDLoc &DL,
   // Check that both sources are concats with undef.
   if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
       N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
-      N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
-      !N1.getOperand(1).isUndef())
+      N1.getNumOperands() != 2 || !N0.getOperand(1).isUndefOrPoison() ||
+      !N1.getOperand(1).isUndefOrPoison())
     return SDValue();
 
   // Construct the new shuffle mask. Elements from the first source retain their
@@ -44628,7 +44648,7 @@ static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
     SDValue LowerOp = SubSrcOps[0];
     ArrayRef<SDValue> UpperOps(std::next(SubSrcOps.begin()), SubSrcOps.end());
     if (LowerOp.getOpcode() == ISD::SETCC &&
-        all_of(UpperOps, [](SDValue Op) { return Op.isUndef(); })) {
+        all_of(UpperOps, [](SDValue Op) { return Op.isUndefOrPoison(); })) {
       EVT SubVT = VT.getIntegerVT(
           *DAG.getContext(), LowerOp.getValueType().getVectorMinNumElements());
       if (SDValue V = combineBitcastvxi1(DAG, SubVT, LowerOp, DL, Subtarget)) {
@@ -44736,7 +44756,7 @@ static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
   APInt Imm(SrcVT.getVectorNumElements(), 0);
   for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
     SDValue In = Op.getOperand(Idx);
-    if (!In.isUndef() && (In->getAsZExtVal() & 0x1))
+    if (!In.isUndefOrPoison() && (In->getAsZExtVal() & 0x1))
       Imm.setBit(Idx);
   }
   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
@@ -44807,7 +44827,7 @@ static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
 
   // Build MMX element from integer GPR or SSE float values.
   auto CreateMMXElement = [&](SDValue V) {
-    if (V.isUndef())
+    if (V.isUndefOrPoison())
       return DAG.getUNDEF(MVT::x86mmx);
     if (V.getValueType().isFloatingPoint()) {
       if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
@@ -44829,7 +44849,7 @@ static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
 
   // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
   if (Splat) {
-    if (Splat.isUndef())
+    if (Splat.isUndefOrPoison())
       return DAG.getUNDEF(MVT::x86mmx);
 
     Splat = CreateMMXElement(Splat);
@@ -45105,7 +45125,7 @@ static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
       bool LowUndef = true, AllUndefOrZero = true;
       for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
         SDValue Op = N0.getOperand(i);
-        LowUndef &= Op.isUndef() || (i >= e/2);
+        LowUndef &= Op.isUndefOrPoison() || (i >= e / 2);
         AllUndefOrZero &= isNullConstantOrUndef(Op);
       }
       if (AllUndefOrZero) {
@@ -48250,7 +48270,7 @@ static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
     bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8;
     bool SignExt1 = DAG.ComputeNumSignBits(VecOp1) > 8;
     // PMOVMSKB(PACKSSBW(X, undef)) -> PMOVMSKB(BITCAST_v16i8(X)) & 0xAAAA.
-    if (IsAnyOf && CmpBits == 8 && VecOp1.isUndef()) {
+    if (IsAnyOf && CmpBits == 8 && VecOp1.isUndefOrPoison()) {
       SDLoc DL(EFLAGS);
       SDValue Result = DAG.getBitcast(MVT::v16i8, VecOp0);
       Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
@@ -49602,8 +49622,8 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
   // Constant Folding.
   APInt UndefElts0, UndefElts1;
   SmallVector<APInt, 32> EltBits0, EltBits1;
-  if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
-      (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
+  if ((N0.isUndefOrPoison() || N->isOnlyUserOf(N0.getNode())) &&
+      (N1.isUndefOrPoison() || N->isOnlyUserOf(N1.getNode())) &&
       getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0,
                                     /*AllowWholeUndefs*/ true,
                                     /*AllowPartialUndefs*/ true) &&
@@ -49660,10 +49680,10 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
   // Try to fold PACKSS(NOT(X),NOT(Y)) -> NOT(PACKSS(X,Y)).
   // Currently limit this to allsignbits cases only.
   if (IsSigned &&
-      (N0.isUndef() || DAG.ComputeNumSignBits(N0) == SrcBitsPerElt) &&
-      (N1.isUndef() || DAG.ComputeNumSignBits(N1) == SrcBitsPerElt)) {
-    SDValue Not0 = N0.isUndef() ? N0 : IsNOT(N0, DAG);
-    SDValue Not1 = N1.isUndef() ? N1 : IsNOT(N1, DAG);
+      (N0.isUndefOrPoison() || DAG.ComputeNumSignBits(N0) == SrcBitsPerElt) &&
+      (N1.isUndefOrPoison() || DAG.ComputeNumSignBits(N1) == SrcBitsPerElt)) {
+    SDValue Not0 = N0.isUndefOrPoison() ? N0 : IsNOT(N0, DAG);
+    SDValue Not1 = N1.isUndefOrPoison() ? N1 : IsNOT(N1, DAG);
     if (Not0 && Not1) {
       SDLoc DL(N);
       MVT SrcVT = N0.getSimpleValueType();
@@ -49676,8 +49696,8 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
 
   // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
   // truncate to create a larger truncate.
-  if (Subtarget.hasAVX512() &&
-      N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
+  if (Subtarget.hasAVX512() && N0.getOpcode() == ISD::TRUNCATE &&
+      N1.isUndefOrPoison() && VT == MVT::v16i8 &&
       N0.getOperand(0).getValueType() == MVT::v8i32) {
     if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
         (!IsSigned &&
@@ -49707,7 +49727,7 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
         N1.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
       Src1 = N1.getOperand(0);
     }
-    if ((Src0 || N0.isUndef()) && (Src1 || N1.isUndef())) {
+    if ((Src0 || N0.isUndefOrPoison()) && (Src1 || N1.isUndefOrPoison())) {
       assert((Src0 || Src1) && "Found PACK(UNDEF,UNDEF)");
       Src0 = Src0 ? Src0 : DAG.getUNDEF(Src1.getValueType());
       Src1 = Src1 ? Src1 : DAG.getUNDEF(Src0.getValueType());
@@ -49717,7 +49737,7 @@ static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
     // Try again with pack(*_extend_vector_inreg, undef).
     unsigned VecInRegOpc = IsSigned ? ISD::SIGN_EXTEND_VECTOR_INREG
                                     : ISD::ZERO_EXTEND_VECTOR_INREG;
-    if (N0.getOpcode() == VecInRegOpc && N1.isUndef() &&
+    if (N0.getOpcode() == VecInRegOpc && N1.isUndefOrPoison() &&
         N0.getOperand(0).getScalarValueSizeInBits() < DstBitsPerElt)
       return getEXTEND_VECTOR_INREG(ExtOpc, SDLoc(N), VT, N0.getOperand(0),
                                     DAG);
@@ -49752,12 +49772,12 @@ static SDValue combineVectorHADDSUB(SDNode *N, SelectionDAG &DAG,
       SDValue LHS1 = LHS.getOperand(1);
       SDValue RHS0 = RHS.getOperand(0);
       SDValue RHS1 = RHS.getOperand(1);
-      if ((LHS0 == LHS1 || LHS0.isUndef() || LHS1.isUndef()) &&
-          (RHS0 == RHS1 || RHS0.isUndef() || RHS1.isUndef())) {
+      if ((LHS0 == LHS1 || LHS0.isUndefOrPoison() || LHS1.isUndefOrPoison()) &&
+          (RHS0 == RHS1 || RHS0.isUndefOrPoison() || RHS1.isUndefOrPoison())) {
         SDLoc DL(N);
         SDValue Res = DAG.getNode(LHS.getOpcode(), DL, LHS.getValueType(),
-                                  LHS0.isUndef() ? LHS1 : LHS0,
-                                  RHS0.isUndef() ? RHS1 : RHS0);
+                                  LHS0.isUndefOrPoison() ? LHS1 : LHS0,
+                                  RHS0.isUndefOrPoison() ? RHS1 : RHS0);
         MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
         Res = DAG.getBitcast(ShufVT, Res);
         SDValue NewLHS =
@@ -49829,7 +49849,7 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
   assert(N1.getValueType() == MVT::i8 && "Unexpected shift amount type");
 
   // (shift undef, X) -> 0
-  if (N0.isUndef())
+  if (N0.isUndefOrPoison())
     return DAG.getConstant(0, SDLoc(N), VT);
 
   // Out of range logical bit shifts are guaranteed to be zero.
@@ -49980,7 +50000,8 @@ static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
   SDValue Idx = N->getOperand(2);
 
   // Fold insert_vector_elt(undef, elt, 0) --> scalar_to_vector(elt).
-  if (Opcode == ISD::INSERT_VECTOR_ELT && Vec.isUndef() && isNullConstant(Idx))
+  if (Opcode == ISD::INSERT_VECTOR_ELT && Vec.isUndefOrPoison() &&
+      isNullConstant(Idx))
     return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Scl);
 
   if (Opcode == X86ISD::PINSRB || Opcode == X86ISD::PINSRW) {
@@ -50159,12 +50180,12 @@ static SDValue combineAndShuffleNot(SDNode *N, SelectionDAG &DAG,
     // end-users are ISD::AND including cases
     // (and(extract_vector_element(SVN), Y)).
     if (!SVN || !SVN->hasOneUse() || !SVN->isSplat() ||
-        !SVN->getOperand(1).isUndef()) {
+        !SVN->getOperand(1).isUndefOrPoison()) {
       return SDValue();
     }
     SDValue IVEN = SVN->getOperand(0);
     if (IVEN.getOpcode() != ISD::INSERT_VECTOR_ELT ||
-        !IVEN.getOperand(0).isUndef() || !IVEN.hasOneUse())
+        !IVEN.getOperand(0).isUndefOrPoison() || !IVEN.hasOneUse())
       return SDValue();
     if (!isa<ConstantSDNode>(IVEN.getOperand(2)) ||
         IVEN.getConstantOperandAPInt(2) != SVN->getSplatIndex())
@@ -52438,7 +52459,7 @@ static int getOneTrueElt(SDValue V) {
   unsigned NumElts = BV->getValueType(0).getVectorNumElements();
   for (unsigned i = 0; i < NumElts; ++i) {
     const SDValue &Op = BV->getOperand(i);
-    if (Op.isUndef())
+    if (Op.isUndefOrPoison())
       continue;
     auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
     if (!ConstNode)
@@ -52558,7 +52579,7 @@ combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
 
   // Don't try this if the pass-through operand is already undefined. That would
   // cause an infinite loop because that's what we're about to create.
-  if (ML->getPassThru().isUndef())
+  if (ML->getPassThru().isUndefOrPoison())
     return SDValue();
 
   if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
@@ -53078,7 +53099,7 @@ static bool isHorizontalBinOp(unsigned HOpcode, SDValue &LHS, SDValue &RHS,
                               SmallVectorImpl<int> &PostShuffleMask,
                               bool ForceHorizOp) {
   // If either operand is undef, bail out. The binop should be simplified.
-  if (LHS.isUndef() || RHS.isUndef())
+  if (LHS.isUndefOrPoison() || RHS.isUndefOrPoison())
     return false;
 
   // Look for the following pattern:
@@ -53870,7 +53891,7 @@ static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
   case ISD::VECTOR_SHUFFLE: {
     // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
     // of this is VECTOR_SHUFFLE(-VEC1, UNDEF).  The mask can be anything here.
-    if (!Op.getOperand(1).isUndef())
+    if (!Op.getOperand(1).isUndefOrPoison())
       return SDValue();
     if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
       if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
@@ -53883,7 +53904,7 @@ static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
     // -V, INDEX).
     SDValue InsVector = Op.getOperand(0);
     SDValue InsVal = Op.getOperand(1);
-    if (!InsVector.isUndef())
+    if (!InsVector.isUndefOrPoison())
       return SDValue();
     if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
       if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
@@ -54260,7 +54281,8 @@ static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
   // Fold not(insert_subvector(undef,sub)) -> insert_subvector(undef,not(sub))
   if (ISD::isBuildVectorAllOnes(N1.getNode()) && VT.isVector() &&
       VT.getVectorElementType() == MVT::i1 &&
-      N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.getOperand(0).isUndef() &&
+      N0.getOpcode() == ISD::INSERT_SUBVECTOR &&
+      N0.getOperand(0).isUndefOrPoison() &&
       TLI.isTypeLegal(N0.getOperand(1).getValueType())) {
     return DAG.getNode(
         ISD::INSERT_SUBVECTOR, DL, VT, N0.getOperand(0),
@@ -54634,7 +54656,7 @@ static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
 
   // ANDNP(undef, x) -> 0
   // ANDNP(x, undef) -> 0
-  if (N0.isUndef() || N1.isUndef())
+  if (N0.isUndefOrPoison() || N1.isUndefOrPoison())
     return DAG.getConstant(0, DL, VT);
 
   // ANDNP(0, x) -> x
@@ -55174,7 +55196,7 @@ static SDValue getInvertedVectorForFMA(SDValue V, SelectionDAG &DAG) {
     if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
       Ops.push_back(DAG.getConstantFP(-Cst->getValueAPF(), SDLoc(Op), EltVT));
     } else {
-      assert(Op.isUndef());
+      assert(Op.isUndefOrPoison());
       Ops.push_back(DAG.getUNDEF(EltVT));
     }
   }
@@ -55370,8 +55392,8 @@ static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
     SDValue N01 = N0.getOperand(1);
     unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
     APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
-    if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
-        (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
+    if ((N00.isUndefOrPoison() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
+        (N01.isUndefOrPoison() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
       return concatSubVectors(N00, N01, DAG, dl);
     }
   }
@@ -57306,7 +57328,7 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
   assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
   unsigned EltSizeInBits = VT.getScalarSizeInBits();
 
-  if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
+  if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndefOrPoison(); }))
     return DAG.getUNDEF(VT);
 
   if (llvm::all_of(Ops, [](SDValue Op) {
@@ -58073,12 +58095,13 @@ static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
   uint64_t IdxVal = N->getConstantOperandVal(2);
   MVT SubVecVT = SubVec.getSimpleValueType();
 
-  if (Vec.isUndef() && SubVec.isUndef())
+  if (Vec.isUndefOrPoison() && SubVec.isUndefOrPoison())
     return DAG.getUNDEF(OpVT);
 
   // Inserting undefs/zeros into zeros/undefs is a zero vector.
-  if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
-      (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
+  if ((Vec.isUndefOrPoison() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
+      (SubVec.isUndefOrPoison() ||
+       ISD::isBuildVectorAllZeros(SubVec.getNode())))
     return getZeroVector(OpVT, Subtarget, DAG, dl);
 
   if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
@@ -58121,7 +58144,8 @@ static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
   // TODO: This is a more general version of a DAGCombiner fold, can we move it
   // there?
   if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
-      SubVec.getOperand(0).isUndef() && isNullConstant(SubVec.getOperand(2)))
+      SubVec.getOperand(0).isUndefOrPoison() &&
+      isNullConstant(SubVec.getOperand(2)))
     return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Vec,
                        SubVec.getOperand(1), N->getOperand(2));
 
@@ -58130,7 +58154,7 @@ static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
   if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
       SubVec.getOperand(0).getSimpleValueType() == OpVT &&
       (IdxVal != 0 ||
-       !(Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())))) {
+       !(Vec.isUndefOrPoison() || ISD::isBuildVectorAllZeros(Vec.getNode())))) {
     int ExtIdxVal = SubVec.getConstantOperandVal(1);
     if (ExtIdxVal != 0) {
       int VecNumElts = OpVT.getVectorNumElements();
@@ -58176,12 +58200,13 @@ static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
   }
 
   // If this is a broadcast insert into an upper undef, use a larger broadcast.
-  if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
+  if (Vec.isUndefOrPoison() && IdxVal != 0 &&
+      SubVec.getOpcode() == X86ISD::VBROADCAST)
     return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
 
   // If this is a broadcast load inserted into an upper undef, use a larger
   // broadcast load.
-  if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
+  if (Vec.isUndefOrPoison() && IdxVal != 0 && SubVec.hasOneUse() &&
       SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
     auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
     SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
@@ -58403,7 +58428,7 @@ static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
       return true;
     if (ISD::isBuildVectorOfConstantFPSDNodes(V.getNode()))
       return true;
-    return V.isUndef();
+    return V.isUndefOrPoison();
   };
 
   // If we're extracting the lowest subvector and we're the only user,



More information about the llvm-commits mailing list