[llvm] [RFC][SelectionDAG] Add and use SDNode::getAsConstantVal() helper (PR #76710)
Alex Bradbury via llvm-commits
llvm-commits at lists.llvm.org
Tue Jan 2 06:21:45 PST 2024
https://github.com/asb updated https://github.com/llvm/llvm-project/pull/76710
>From e4ef2c11499d2769529ffe2793c560247b1978e1 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Tue, 2 Jan 2024 11:27:24 +0000
Subject: [PATCH 1/2] [SelectionDAG] Add and use SDNode::getAsConstantVal()
helper
This follows on from #76708, allowing
`cast<ConstantSDNode>(N)->getZExtValue()` to be replaced with just
`N2->getAsConstantVal();`
Introduced via `git grep -l "cast<ConstantSDNode>\(.*\).*getZExtValue" |
xargs sed -E -i
's/cast<ConstantSDNode>\((.*)\)->getZExtValue/\1->getAsConstantVal/'`
and then using `git clang-format` on the result.
---
llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 5 +++
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 2 +-
.../lib/CodeGen/SelectionDAG/InstrEmitter.cpp | 4 +--
.../SelectionDAG/LegalizeFloatTypes.cpp | 2 +-
.../SelectionDAG/LegalizeIntegerTypes.cpp | 2 +-
.../SelectionDAG/LegalizeVectorTypes.cpp | 10 +++---
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 17 +++++-----
.../SelectionDAG/SelectionDAGBuilder.cpp | 34 ++++++++-----------
.../SelectionDAG/SelectionDAGDumper.cpp | 2 +-
.../CodeGen/SelectionDAG/SelectionDAGISel.cpp | 15 ++++----
.../Target/AArch64/AArch64ISelDAGToDAG.cpp | 6 ++--
.../Target/AArch64/AArch64ISelLowering.cpp | 10 +++---
.../AArch64/AArch64SelectionDAGInfo.cpp | 2 +-
llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/AMDGPU/R600ISelLowering.cpp | 8 ++---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 6 ++--
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 4 +--
llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp | 25 +++++++-------
llvm/lib/Target/ARM/ARMISelLowering.cpp | 19 +++++------
llvm/lib/Target/AVR/AVRISelLowering.cpp | 4 +--
.../Target/Hexagon/HexagonISelDAGToDAG.cpp | 2 +-
.../Target/Hexagon/HexagonISelLoweringHVX.cpp | 6 ++--
.../LoongArch/LoongArchISelLowering.cpp | 19 +++++------
llvm/lib/Target/M68k/M68kISelLowering.cpp | 4 +--
llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp | 4 +--
llvm/lib/Target/MSP430/MSP430ISelLowering.cpp | 6 ++--
llvm/lib/Target/Mips/MipsISelLowering.cpp | 3 +-
llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 8 ++---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 2 +-
llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp | 14 ++++----
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 16 ++++-----
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 19 +++++------
.../Target/SystemZ/SystemZISelDAGToDAG.cpp | 8 ++---
.../Target/SystemZ/SystemZISelLowering.cpp | 27 ++++++---------
.../WebAssembly/WebAssemblyISelLowering.cpp | 7 ++--
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/X86/X86ISelLowering.cpp | 24 ++++++-------
llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp | 2 +-
40 files changed, 167 insertions(+), 189 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 5c44538fe69974..d13badd83f5493 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -923,6 +923,7 @@ END_TWO_BYTE_PACK()
/// Helper method returns the integer value of a ConstantSDNode operand.
inline uint64_t getConstantOperandVal(unsigned Num) const;
+ inline uint64_t getAsConstantVal() const;
/// Helper method returns the APInt of a ConstantSDNode operand.
inline const APInt &getConstantOperandAPInt(unsigned Num) const;
@@ -1640,6 +1641,10 @@ uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
}
+uint64_t SDNode::getAsConstantVal() const {
+ return cast<ConstantSDNode>(this)->getZExtValue();
+}
+
const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index eafa95ce7fcf71..9a41531ce9fba6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -14709,7 +14709,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue EltNo = N0->getOperand(1);
if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
- int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+ int Elt = EltNo->getAsConstantVal();
int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
SDLoc DL(N);
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 34fa1f5a7ed1fb..248f26ce5cecb1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -551,7 +551,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
SDValue N0 = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
SDValue N2 = Node->getOperand(2);
- unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue();
+ unsigned SubIdx = N2->getAsConstantVal();
// Figure out the register class to create for the destreg. It should be
// the largest legal register class supporting SubIdx sub-registers.
@@ -650,7 +650,7 @@ void InstrEmitter::EmitRegSequence(SDNode *Node,
// Skip physical registers as they don't have a vreg to get and we'll
// insert copies for them in TwoAddressInstructionPass anyway.
if (!R || !R->getReg().isPhysical()) {
- unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue();
+ unsigned SubIdx = Op->getAsConstantVal();
unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
const TargetRegisterClass *SRC =
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 65919a64b80653..00522535daca1f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -2490,7 +2490,7 @@ SDValue DAGTypeLegalizer::PromoteFloatRes_EXTRACT_VECTOR_ELT(SDNode *N) {
EVT VecVT = Vec->getValueType(0);
EVT EltVT = VecVT.getVectorElementType();
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsConstantVal();
switch (getTypeAction(VecVT)) {
default: break;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 3d21bd22e6ef5d..5a848fce42b689 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -5557,7 +5557,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N) {
getTypeAction(InVT) == TargetLowering::TypeLegal) {
EVT NInVT = InVT.getHalfNumVectorElementsVT(*DAG.getContext());
unsigned NElts = NInVT.getVectorMinNumElements();
- uint64_t IdxVal = cast<ConstantSDNode>(BaseIdx)->getZExtValue();
+ uint64_t IdxVal = BaseIdx->getAsConstantVal();
SDValue Step1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NInVT, InOp0,
DAG.getConstant(alignDown(IdxVal, NElts), dl,
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 66461b26468f79..475f6fba60ec9a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1442,7 +1442,7 @@ void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo,
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, LoVT, Vec, Idx);
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsConstantVal();
Hi = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, HiVT, Vec,
DAG.getVectorIdxConstant(IdxVal + LoVT.getVectorMinNumElements(), dl));
@@ -1466,7 +1466,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo,
// If we know the index is in the first half, and we know the subvector
// doesn't cross the boundary between the halves, we can avoid spilling the
// vector, and insert into the lower half of the split vector directly.
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ unsigned IdxVal = Idx->getAsConstantVal();
if (IdxVal + SubElems <= LoElems) {
Lo = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, LoVT, Lo, SubVec, Idx);
return;
@@ -3279,7 +3279,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_INSERT_SUBVECTOR(SDNode *N,
SDValue Lo, Hi;
GetSplitVector(SubVec, Lo, Hi);
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsConstantVal();
uint64_t LoElts = Lo.getValueType().getVectorMinNumElements();
SDValue FirstInsertion =
@@ -3301,7 +3301,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
GetSplitVector(N->getOperand(0), Lo, Hi);
uint64_t LoEltsMin = Lo.getValueType().getVectorMinNumElements();
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsConstantVal();
if (IdxVal < LoEltsMin) {
assert(IdxVal + SubVT.getVectorMinNumElements() <= LoEltsMin &&
@@ -5257,7 +5257,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
EVT InVT = InOp.getValueType();
// Check if we can just return the input vector after widening.
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsConstantVal();
if (IdxVal == 0 && InVT == WidenVT)
return InOp;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index eb4deb6306fd5f..91a0caad55b19e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -7193,8 +7193,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
assert(isa<ConstantSDNode>(N3) &&
"Insert subvector index must be constant");
assert((VT.isScalableVector() != N2VT.isScalableVector() ||
- (N2VT.getVectorMinNumElements() +
- cast<ConstantSDNode>(N3)->getZExtValue()) <=
+ (N2VT.getVectorMinNumElements() + N3->getAsConstantVal()) <=
VT.getVectorMinNumElements()) &&
"Insert subvector overflow!");
assert(cast<ConstantSDNode>(N3)->getAPIntValue().getBitWidth() ==
@@ -9978,13 +9977,13 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
VTList.VTs[0].getVectorElementCount() ==
Ops[1].getValueType().getVectorElementCount()) &&
"Vector element count mismatch!");
- assert(VTList.VTs[0].isFloatingPoint() &&
- Ops[1].getValueType().isFloatingPoint() &&
- VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
- isa<ConstantSDNode>(Ops[2]) &&
- (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 ||
- cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) &&
- "Invalid STRICT_FP_ROUND!");
+ assert(
+ VTList.VTs[0].isFloatingPoint() &&
+ Ops[1].getValueType().isFloatingPoint() &&
+ VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
+ isa<ConstantSDNode>(Ops[2]) &&
+ (Ops[2]->getAsConstantVal() == 0 || Ops[2]->getAsConstantVal() == 1) &&
+ "Invalid STRICT_FP_ROUND!");
break;
#if 0
// FIXME: figure out how to safely handle things like
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 3c4b285cb06747..ca37b3da065d1c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -5642,7 +5642,7 @@ static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
// expansion/promotion) if it was possible to expand a libcall of an
// illegal type during operation legalization. But it's not, so things
// get a bit hacky.
- unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
+ unsigned ScaleInt = Scale->getAsConstantVal();
if ((ScaleInt > 0 || (Saturating && Signed)) &&
(TLI.isTypeLegal(VT) ||
(VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
@@ -7655,8 +7655,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// suitable for the target. Convert the index as required.
MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
if (Index.getValueType() != VectorIdxTy)
- Index = DAG.getVectorIdxConstant(
- cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
+ Index = DAG.getVectorIdxConstant(Index->getAsConstantVal(), sdl);
EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
@@ -7672,8 +7671,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// suitable for the target. Convert the index as required.
MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
if (Index.getValueType() != VectorIdxTy)
- Index = DAG.getVectorIdxConstant(
- cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
+ Index = DAG.getVectorIdxConstant(Index->getAsConstantVal(), sdl);
setValue(&I,
DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
@@ -8136,7 +8134,7 @@ void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
case ISD::VP_IS_FPCLASS: {
const DataLayout DLayout = DAG.getDataLayout();
EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
- auto Constant = cast<ConstantSDNode>(OpValues[1])->getZExtValue();
+ auto Constant = OpValues[1]->getAsConstantVal();
SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
{OpValues[0], Check, OpValues[2], OpValues[3]});
@@ -9173,8 +9171,7 @@ findMatchingInlineAsmOperand(unsigned OperandNo,
unsigned CurOp = InlineAsm::Op_FirstOperand;
for (; OperandNo; --OperandNo) {
// Advance to the next operand.
- unsigned OpFlag =
- cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
+ unsigned OpFlag = AsmNodeOperands[CurOp]->getAsConstantVal();
const InlineAsm::Flag F(OpFlag);
assert(
(F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
@@ -9480,8 +9477,7 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
// just use its register.
auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
AsmNodeOperands);
- InlineAsm::Flag Flag(
- cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue());
+ InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsConstantVal());
if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
if (OpInfo.isIndirect) {
// This happens on gcc/testsuite/gcc.dg/pr8788-1.c
@@ -9985,14 +9981,14 @@ void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
// constant nodes.
SDValue ID = getValue(CI.getArgOperand(0));
assert(ID.getValueType() == MVT::i64);
- SDValue IDConst = DAG.getTargetConstant(
- cast<ConstantSDNode>(ID)->getZExtValue(), DL, ID.getValueType());
+ SDValue IDConst =
+ DAG.getTargetConstant(ID->getAsConstantVal(), DL, ID.getValueType());
Ops.push_back(IDConst);
SDValue Shad = getValue(CI.getArgOperand(1));
assert(Shad.getValueType() == MVT::i32);
- SDValue ShadConst = DAG.getTargetConstant(
- cast<ConstantSDNode>(Shad)->getZExtValue(), DL, Shad.getValueType());
+ SDValue ShadConst =
+ DAG.getTargetConstant(Shad->getAsConstantVal(), DL, Shad.getValueType());
Ops.push_back(ShadConst);
// Add the live variables.
@@ -10041,7 +10037,7 @@ void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
// Get the real number of arguments participating in the call <numArgs>
SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
- unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
+ unsigned NumArgs = NArgVal->getAsConstantVal();
// Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
// Intrinsics include all meta-operands up to but not including CC.
@@ -10088,12 +10084,10 @@ void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
// Add the <id> and <numBytes> constants.
SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
- Ops.push_back(DAG.getTargetConstant(
- cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
+ Ops.push_back(DAG.getTargetConstant(IDVal->getAsConstantVal(), dl, MVT::i64));
SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
- Ops.push_back(DAG.getTargetConstant(
- cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
- MVT::i32));
+ Ops.push_back(
+ DAG.getTargetConstant(NBytesVal->getAsConstantVal(), dl, MVT::i32));
// Add the callee.
Ops.push_back(Callee);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 78cc60084068a5..93fe7f49c6aee3 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -149,7 +149,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN: {
unsigned OpNo = getOpcode() == ISD::INTRINSIC_WO_CHAIN ? 0 : 1;
- unsigned IID = cast<ConstantSDNode>(getOperand(OpNo))->getZExtValue();
+ unsigned IID = getOperand(OpNo)->getAsConstantVal();
if (IID < Intrinsic::num_intrinsics)
return Intrinsic::getBaseName((Intrinsic::ID)IID).str();
if (!G)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index f28211ac113ca9..72b03e00fd90ae 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -2121,7 +2121,7 @@ void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
--e; // Don't process a glue operand if it is here.
while (i != e) {
- InlineAsm::Flag Flags(cast<ConstantSDNode>(InOps[i])->getZExtValue());
+ InlineAsm::Flag Flags(InOps[i]->getAsConstantVal());
if (!Flags.isMemKind() && !Flags.isFuncKind()) {
// Just skip over this operand, copying the operands verbatim.
Ops.insert(Ops.end(), InOps.begin() + i,
@@ -2135,12 +2135,10 @@ void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
if (Flags.isUseOperandTiedToDef(TiedToOperand)) {
// We need the constraint ID from the operand this is tied to.
unsigned CurOp = InlineAsm::Op_FirstOperand;
- Flags =
- InlineAsm::Flag(cast<ConstantSDNode>(InOps[CurOp])->getZExtValue());
+ Flags = InlineAsm::Flag(InOps[CurOp]->getAsConstantVal());
for (; TiedToOperand; --TiedToOperand) {
CurOp += Flags.getNumOperandRegisters() + 1;
- Flags = InlineAsm::Flag(
- cast<ConstantSDNode>(InOps[CurOp])->getZExtValue());
+ Flags = InlineAsm::Flag(InOps[CurOp]->getAsConstantVal());
}
}
@@ -2380,9 +2378,8 @@ void SelectionDAGISel::pushStackMapLiveVariable(SmallVectorImpl<SDValue> &Ops,
if (OpNode->getOpcode() == ISD::Constant) {
Ops.push_back(
CurDAG->getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
- Ops.push_back(
- CurDAG->getTargetConstant(cast<ConstantSDNode>(OpNode)->getZExtValue(),
- DL, OpVal.getValueType()));
+ Ops.push_back(CurDAG->getTargetConstant(OpNode->getAsConstantVal(), DL,
+ OpVal.getValueType()));
} else {
Ops.push_back(OpVal);
}
@@ -2452,7 +2449,7 @@ void SelectionDAGISel::Select_PATCHPOINT(SDNode *N) {
Ops.push_back(*It++);
// Push the args for the call.
- for (uint64_t I = cast<ConstantSDNode>(NumArgs)->getZExtValue(); I != 0; I--)
+ for (uint64_t I = NumArgs->getAsConstantVal(); I != 0; I--)
Ops.push_back(*It++);
// Now push the live variables.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 476d99c2a7e045..e28258b7025316 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -580,7 +580,7 @@ bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
if (!isa<ConstantSDNode>(N.getNode()))
return false;
- uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
+ uint64_t Immed = N.getNode()->getAsConstantVal();
unsigned ShiftAmt;
if (Immed >> 12 == 0) {
@@ -611,7 +611,7 @@ bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
return false;
// The immediate operand must be a 24-bit zero-extended immediate.
- uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
+ uint64_t Immed = N.getNode()->getAsConstantVal();
// This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
// have the opposite effect on the C flag, so this pattern mustn't match under
@@ -1326,7 +1326,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
// MOV X0, WideImmediate
// LDR X2, [BaseReg, X0]
if (isa<ConstantSDNode>(RHS)) {
- int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
+ int64_t ImmOff = (int64_t)RHS->getAsConstantVal();
// Skip the immediate can be selected by load/store addressing mode.
// Also skip the immediate can be encoded by a single ADD (SUB is also
// checked by using -ImmOff).
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 26f013a18f3827..37d0ef90554e1d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3589,7 +3589,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// can be turned into:
// cmp w12, w11, lsl #1
if (!isa<ConstantSDNode>(RHS) ||
- !isLegalArithImmed(cast<ConstantSDNode>(RHS)->getZExtValue())) {
+ !isLegalArithImmed(RHS->getAsConstantVal())) {
SDValue TheLHS = isCMN(LHS, CC) ? LHS.getOperand(1) : LHS;
if (getCmpOperandFoldingProfit(TheLHS) > getCmpOperandFoldingProfit(RHS)) {
@@ -3623,7 +3623,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD &&
cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 &&
LHS.getNode()->hasNUsesOfValue(1, 0)) {
- int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue();
+ int16_t ValueofRHS = RHS->getAsConstantVal();
if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) {
SDValue SExt =
DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS,
@@ -5619,7 +5619,7 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
// SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
// must be calculated before hand.
- uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
+ uint64_t ScaleVal = Scale->getAsConstantVal();
if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
EVT IndexVT = Index.getValueType();
@@ -5707,7 +5707,7 @@ SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
// SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
// must be calculated before hand.
- uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
+ uint64_t ScaleVal = Scale->getAsConstantVal();
if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
EVT IndexVT = Index.getValueType();
@@ -22011,7 +22011,7 @@ static SDValue performBRCONDCombine(SDNode *N,
SDValue Cmp = N->getOperand(3);
assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!");
- unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue();
+ unsigned CC = CCVal->getAsConstantVal();
if (CC != AArch64CC::EQ && CC != AArch64CC::NE)
return SDValue();
diff --git a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
index 1a76f354589eee..3415b533047d1a 100644
--- a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
@@ -172,7 +172,7 @@ static SDValue EmitUnrolledSetTag(SelectionDAG &DAG, const SDLoc &dl,
SDValue AArch64SelectionDAGInfo::EmitTargetCodeForSetTag(
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr,
SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const {
- uint64_t ObjSize = cast<ConstantSDNode>(Size)->getZExtValue();
+ uint64_t ObjSize = Size->getAsConstantVal();
assert(ObjSize % 16 == 0);
MachineFunction &MF = DAG.getMachineFunction();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 28604af2cdb3ce..3a78024dd79c2d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -382,7 +382,7 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
Subtarget->getRegisterInfo()->getRegClass(RCID);
SDValue SubRegOp = N->getOperand(OpNo + 1);
- unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
+ unsigned SubRegIdx = SubRegOp->getAsConstantVal();
return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
SubRegIdx);
}
diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index 9a2fb0bc37b2c0..6b43e47dc8a378 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -1651,7 +1651,7 @@ SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[],
BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap);
for (unsigned i = 0; i < 4; i++) {
- unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
+ unsigned Idx = Swz[i]->getAsConstantVal();
if (SwizzleRemap.contains(Idx))
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32);
}
@@ -1659,7 +1659,7 @@ SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[],
SwizzleRemap.clear();
BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap);
for (unsigned i = 0; i < 4; i++) {
- unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
+ unsigned Idx = Swz[i]->getAsConstantVal();
if (SwizzleRemap.contains(Idx))
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32);
}
@@ -1780,7 +1780,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
// Check that we know which element is being inserted
if (!isa<ConstantSDNode>(EltNo))
return SDValue();
- unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+ unsigned Elt = EltNo->getAsConstantVal();
// Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
// be converted to a BUILD_VECTOR). Fill in the Ops vector with the
@@ -2021,7 +2021,7 @@ bool R600TargetLowering::FoldOperand(SDNode *ParentNode, unsigned SrcIdx,
}
case R600::MOV_IMM_GLOBAL_ADDR:
// Check if the Imm slot is used. Taken from below.
- if (cast<ConstantSDNode>(Imm)->getZExtValue())
+ if (Imm->getAsConstantVal())
return false;
Imm = Src.getOperand(0);
Src = DAG.getRegister(R600::ALU_LITERAL_X, MVT::i32);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 0e857e6ac71b61..4c16229e517912 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6424,7 +6424,7 @@ SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
EVT InsVT = Ins.getValueType();
EVT EltVT = VecVT.getVectorElementType();
unsigned InsNumElts = InsVT.getVectorNumElements();
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ unsigned IdxVal = Idx->getAsConstantVal();
SDLoc SL(Op);
if (EltVT.getScalarSizeInBits() == 16 && IdxVal % 2 == 0) {
@@ -7453,7 +7453,7 @@ SDValue SITargetLowering::lowerImage(SDValue Op,
Ops.push_back(IsA16 ? True : False);
if (!Subtarget->hasGFX90AInsts()) {
Ops.push_back(TFE); //tfe
- } else if (cast<ConstantSDNode>(TFE)->getZExtValue()) {
+ } else if (TFE->getAsConstantVal()) {
report_fatal_error("TFE is not supported on this GPU");
}
if (!IsGFX12Plus || BaseOpcode->Sampler || BaseOpcode->MSAA)
@@ -7590,7 +7590,7 @@ SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
setBufferOffsets(Offset, DAG, &Ops[3],
NumLoads > 1 ? Align(16 * NumLoads) : Align(4));
- uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
+ uint64_t InstOffset = Ops[5]->getAsConstantVal();
for (unsigned i = 0; i < NumLoads; ++i) {
Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32);
Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops,
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index aae6f2e842fd1a..4a8b80c2852bf7 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -338,8 +338,8 @@ bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
return false;
- Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
- Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
+ Offset0 = Off0->getAsConstantVal();
+ Offset1 = Off1->getAsConstantVal();
return true;
}
diff --git a/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp b/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp
index 28e35f8f2a548a..aec01c002ea291 100644
--- a/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp
@@ -170,7 +170,7 @@ bool ARCDAGToDAGISel::SelectFrameADDR_ri(SDValue Addr, SDValue &Base,
void ARCDAGToDAGISel::Select(SDNode *N) {
switch (N->getOpcode()) {
case ISD::Constant: {
- uint64_t CVal = cast<ConstantSDNode>(N)->getZExtValue();
+ uint64_t CVal = N->getAsConstantVal();
ReplaceNode(N, CurDAG->getMachineNode(
isInt<12>(CVal) ? ARC::MOV_rs12 : ARC::MOV_rlimm,
SDLoc(N), MVT::i32,
diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index adc429b61bbcce..29dad62d197034 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -372,7 +372,7 @@ INITIALIZE_PASS(ARMDAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
/// operand. If so Imm will receive the 32-bit value.
static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
- Imm = cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = N->getAsConstantVal();
return true;
}
return false;
@@ -1101,8 +1101,8 @@ bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
Offset = N.getOperand(0);
SDValue N1 = N.getOperand(1);
- Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
- SDLoc(N), MVT::i32);
+ Label =
+ CurDAG->getTargetConstant(N1->getAsConstantVal(), SDLoc(N), MVT::i32);
return true;
}
@@ -1942,7 +1942,7 @@ SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, const SDLoc &dl,
if (!is64BitVector && NumVecs < 3)
NumRegs *= 2;
- unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
+ unsigned Alignment = Align->getAsConstantVal();
if (Alignment >= 32 && NumRegs == 4)
Alignment = 32;
else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
@@ -2428,7 +2428,7 @@ void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
unsigned Alignment = 0;
if (NumVecs != 3) {
- Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
+ Alignment = Align->getAsConstantVal();
unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
if (Alignment > NumBytes)
Alignment = NumBytes;
@@ -2871,7 +2871,7 @@ void ARMDAGToDAGISel::SelectMVE_VxDUP(SDNode *N, const uint16_t *Opcodes,
Ops.push_back(N->getOperand(OpIdx++)); // limit
SDValue ImmOp = N->getOperand(OpIdx++); // step
- int ImmValue = cast<ConstantSDNode>(ImmOp)->getZExtValue();
+ int ImmValue = ImmOp->getAsConstantVal();
Ops.push_back(getI32Imm(ImmValue, Loc));
if (Predicated)
@@ -2892,7 +2892,7 @@ void ARMDAGToDAGISel::SelectCDE_CXxD(SDNode *N, uint16_t Opcode,
// Convert and append the immediate operand designating the coprocessor.
SDValue ImmCorpoc = N->getOperand(OpIdx++);
- uint32_t ImmCoprocVal = cast<ConstantSDNode>(ImmCorpoc)->getZExtValue();
+ uint32_t ImmCoprocVal = ImmCorpoc->getAsConstantVal();
Ops.push_back(getI32Imm(ImmCoprocVal, Loc));
// For accumulating variants copy the low and high order parts of the
@@ -2911,7 +2911,7 @@ void ARMDAGToDAGISel::SelectCDE_CXxD(SDNode *N, uint16_t Opcode,
// Convert and append the immediate operand
SDValue Imm = N->getOperand(OpIdx);
- uint32_t ImmVal = cast<ConstantSDNode>(Imm)->getZExtValue();
+ uint32_t ImmVal = Imm->getAsConstantVal();
Ops.push_back(getI32Imm(ImmVal, Loc));
// Accumulating variants are IT-predicable, add predicate operands.
@@ -2965,7 +2965,7 @@ void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool IsIntrinsic,
unsigned Alignment = 0;
if (NumVecs != 3) {
- Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
+ Alignment = Align->getAsConstantVal();
unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
if (Alignment > NumBytes)
Alignment = NumBytes;
@@ -3697,7 +3697,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
// Other cases are autogenerated.
break;
case ISD::Constant: {
- unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
+ unsigned Val = N->getAsConstantVal();
// If we can't materialize the constant we need to use a literal pool
if (ConstantMaterializationCost(Val, Subtarget) > 2 &&
!Subtarget->genExecuteOnly()) {
@@ -4132,7 +4132,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
assert(N2.getOpcode() == ISD::Constant);
assert(N3.getOpcode() == ISD::Register);
- unsigned CC = (unsigned) cast<ConstantSDNode>(N2)->getZExtValue();
+ unsigned CC = (unsigned)N2->getAsConstantVal();
if (InGlue.getOpcode() == ARMISD::CMPZ) {
if (InGlue.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN) {
@@ -4243,8 +4243,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
if (SwitchEQNEToPLMI) {
SDValue ARMcc = N->getOperand(2);
- ARMCC::CondCodes CC =
- (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsConstantVal();
switch (CC) {
default: llvm_unreachable("CMPZ must be either NE or EQ!");
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index cf9646a0b81ed0..cce000eeef15c3 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -4820,8 +4820,7 @@ SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// some tweaks to the heuristics for the previous and->shift transform.
// FIXME: Optimize cases where the LHS isn't a shift.
if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL &&
- isa<ConstantSDNode>(RHS) &&
- cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U &&
+ isa<ConstantSDNode>(RHS) && RHS->getAsConstantVal() == 0x80000000U &&
CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) &&
LHS.getConstantOperandVal(1) < 31) {
unsigned ShiftAmt = LHS.getConstantOperandVal(1) + 1;
@@ -5533,7 +5532,7 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
// Choose GE over PL, which vsel does now support
- if (cast<ConstantSDNode>(ARMcc)->getZExtValue() == ARMCC::PL)
+ if (ARMcc->getAsConstantVal() == ARMCC::PL)
ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32);
return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
}
@@ -7749,7 +7748,7 @@ static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
uint64_t Val;
if (!isa<ConstantSDNode>(N))
return SDValue();
- Val = cast<ConstantSDNode>(N)->getZExtValue();
+ Val = N->getAsConstantVal();
if (ST->isThumb1Only()) {
if (Val <= 255 || ~Val <= 255)
@@ -7804,7 +7803,7 @@ static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
SDValue V = Op.getOperand(i);
if (!isa<ConstantSDNode>(V) && !V.isUndef())
continue;
- bool BitSet = V.isUndef() ? false : cast<ConstantSDNode>(V)->getZExtValue();
+ bool BitSet = V.isUndef() ? false : V->getAsConstantVal();
if (BitSet)
Bits32 |= BoolMask << (i * BitsPerBool);
}
@@ -9240,7 +9239,7 @@ static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG,
EVT VT = Op.getValueType();
EVT Op1VT = V1.getValueType();
unsigned NumElts = VT.getVectorNumElements();
- unsigned Index = cast<ConstantSDNode>(V2)->getZExtValue();
+ unsigned Index = V2->getAsConstantVal();
assert(VT.getScalarSizeInBits() == 1 &&
"Unexpected custom EXTRACT_SUBVECTOR lowering");
@@ -14618,7 +14617,7 @@ static SDValue PerformORCombineToBFI(SDNode *N,
// Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
// where lsb(mask) == #shamt and masked bits of B are known zero.
SDValue ShAmt = N00.getOperand(1);
- unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
+ unsigned ShAmtC = ShAmt->getAsConstantVal();
unsigned LSB = llvm::countr_zero(Mask);
if (ShAmtC != LSB)
return SDValue();
@@ -18339,8 +18338,7 @@ ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
SDValue Chain = N->getOperand(0);
SDValue BB = N->getOperand(1);
SDValue ARMcc = N->getOperand(2);
- ARMCC::CondCodes CC =
- (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsConstantVal();
// (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
// -> (brcond Chain BB CC CPSR Cmp)
@@ -18373,8 +18371,7 @@ ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
SDValue FalseVal = N->getOperand(0);
SDValue TrueVal = N->getOperand(1);
SDValue ARMcc = N->getOperand(2);
- ARMCC::CondCodes CC =
- (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsConstantVal();
// BFI is only available on V6T2+.
if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index d36bfb188ed360..be0cec36eebdf3 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -660,7 +660,7 @@ SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS,
SDValue Cmp;
if (LHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(RHS)) {
- uint64_t Imm = cast<ConstantSDNode>(RHS)->getZExtValue();
+ uint64_t Imm = RHS->getAsConstantVal();
// Generate a CPI/CPC pair if RHS is a 16-bit constant. Use the zero
// register for the constant RHS if its lower or higher byte is zero.
SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
@@ -680,7 +680,7 @@ SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS,
} else if (RHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(LHS)) {
// Generate a CPI/CPC pair if LHS is a 16-bit constant. Use the zero
// register for the constant LHS if its lower or higher byte is zero.
- uint64_t Imm = cast<ConstantSDNode>(LHS)->getZExtValue();
+ uint64_t Imm = LHS->getAsConstantVal();
SDValue LHSlo = (Imm & 0xff) == 0
? DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8)
: DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index eb5c59672224e9..eeb18056709a34 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -743,7 +743,7 @@ void HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
//
void HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
if (N->getValueType(0) == MVT::i1) {
- assert(!(cast<ConstantSDNode>(N)->getZExtValue() >> 1));
+ assert(!(N->getAsConstantVal() >> 1));
unsigned Opc = (cast<ConstantSDNode>(N)->getSExtValue() != 0)
? Hexagon::PS_true
: Hexagon::PS_false;
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
index 665e2d79c83d10..d84c0939a66bfc 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
@@ -1256,7 +1256,7 @@ HexagonTargetLowering::extractHvxSubvectorReg(SDValue OrigOp, SDValue VecV,
SDValue IdxV, const SDLoc &dl, MVT ResTy, SelectionDAG &DAG) const {
MVT VecTy = ty(VecV);
unsigned HwLen = Subtarget.getVectorLength();
- unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue();
+ unsigned Idx = IdxV.getNode()->getAsConstantVal();
MVT ElemTy = VecTy.getVectorElementType();
unsigned ElemWidth = ElemTy.getSizeInBits();
@@ -1299,7 +1299,7 @@ HexagonTargetLowering::extractHvxSubvectorPred(SDValue VecV, SDValue IdxV,
MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen);
SDValue ByteVec = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, VecV);
// IdxV is required to be a constant.
- unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue();
+ unsigned Idx = IdxV.getNode()->getAsConstantVal();
unsigned ResLen = ResTy.getVectorNumElements();
unsigned BitBytes = HwLen / VecTy.getVectorNumElements();
@@ -1801,7 +1801,7 @@ HexagonTargetLowering::LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG)
MVT SrcTy = ty(SrcV);
MVT DstTy = ty(Op);
SDValue IdxV = Op.getOperand(1);
- unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue();
+ unsigned Idx = IdxV.getNode()->getAsConstantVal();
assert(Idx % DstTy.getVectorNumElements() == 0);
(void)Idx;
const SDLoc &dl(Op);
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index e14bbadf9ed227..a776511fb9f493 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -525,8 +525,7 @@ LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
if (isa<ConstantSDNode>(Idx) &&
(EltTy == MVT::i32 || EltTy == MVT::i64 || EltTy == MVT::f32 ||
- EltTy == MVT::f64 ||
- cast<ConstantSDNode>(Idx)->getZExtValue() < NumElts / 2))
+ EltTy == MVT::f64 || Idx->getAsConstantVal() < NumElts / 2))
return Op;
return SDValue();
@@ -1383,28 +1382,28 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.is64Bit())
return emitIntrinsicErrorMessage(Op, ErrorMsgReqLA32, DAG);
// call void @llvm.loongarch.cacop.[d/w](uimm5, rj, simm12)
- unsigned Imm1 = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm1 = Op2->getAsConstantVal();
int Imm2 = cast<ConstantSDNode>(Op.getOperand(4))->getSExtValue();
if (!isUInt<5>(Imm1) || !isInt<12>(Imm2))
return emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG);
return Op;
}
case Intrinsic::loongarch_dbar: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::DBAR, DL, MVT::Other, Chain,
DAG.getConstant(Imm, DL, GRLenVT));
}
case Intrinsic::loongarch_ibar: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::IBAR, DL, MVT::Other, Chain,
DAG.getConstant(Imm, DL, GRLenVT));
}
case Intrinsic::loongarch_break: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::BREAK, DL, MVT::Other, Chain,
@@ -1413,7 +1412,7 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
case Intrinsic::loongarch_movgr2fcsr: {
if (!Subtarget.hasBasicF())
return emitIntrinsicErrorMessage(Op, ErrorMsgReqF, DAG);
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
return !isUInt<2>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::MOVGR2FCSR, DL, MVT::Other, Chain,
@@ -1422,7 +1421,7 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
Op.getOperand(3)));
}
case Intrinsic::loongarch_syscall: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::SYSCALL, DL, MVT::Other, Chain,
@@ -1925,7 +1924,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqF);
return;
}
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
if (!isUInt<2>(Imm)) {
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgOOR);
return;
@@ -1981,7 +1980,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
CSR_CASE(iocsrrd_d);
#undef CSR_CASE
case Intrinsic::loongarch_csrrd_w: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
if (!isUInt<14>(Imm)) {
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgOOR);
return;
diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp
index c4d7a0dec7f390..b26ed5c4749de6 100644
--- a/llvm/lib/Target/M68k/M68kISelLowering.cpp
+++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp
@@ -2375,7 +2375,7 @@ SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// a >= b ? -1 : 0 -> RES = setcc_carry
// a >= b ? 0 : -1 -> RES = ~setcc_carry
if (Cond.getOpcode() == M68kISD::SUB) {
- unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
+ unsigned CondCode = CC->getAsConstantVal();
if ((CondCode == M68k::COND_CC || CondCode == M68k::COND_CS) &&
(isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
@@ -2491,7 +2491,7 @@ SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
Cond = Cmp;
AddTest = false;
} else {
- switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
+ switch (CC->getAsConstantVal()) {
default:
break;
case M68k::COND_VS:
diff --git a/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 660861a5d52182..b4cee26cc09b33 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -308,12 +308,12 @@ static bool isValidIndexedLoad(const LoadSDNode *LD) {
switch (VT.getSimpleVT().SimpleTy) {
case MVT::i8:
- if (cast<ConstantSDNode>(LD->getOffset())->getZExtValue() != 1)
+ if (LD->getOffset()->getAsConstantVal() != 1)
return false;
break;
case MVT::i16:
- if (cast<ConstantSDNode>(LD->getOffset())->getZExtValue() != 2)
+ if (LD->getOffset()->getAsConstantVal() != 2)
return false;
break;
diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index d3b59138a5a95e..3174d613cc38bf 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -1168,8 +1168,8 @@ SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
bool Invert = false;
bool Shift = false;
bool Convert = true;
- switch (cast<ConstantSDNode>(TargetCC)->getZExtValue()) {
- default:
+ switch (TargetCC->getAsConstantVal()) {
+ default:
Convert = false;
break;
case MSP430CC::COND_HS:
@@ -1193,7 +1193,7 @@ SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
// C = ~Z for AND instruction, thus we can put Res = ~(SR & 1), however,
// Res = (SR >> 1) & 1 is 1 word shorter.
break;
- }
+ }
EVT VT = Op.getValueType();
SDValue One = DAG.getConstant(1, dl, VT);
if (Convert) {
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 483eba4e4f4794..301eb3169ef2f9 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -2042,8 +2042,7 @@ SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
return Op;
SDValue CCNode = CondRes.getOperand(2);
- Mips::CondCode CC =
- (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
+ Mips::CondCode CC = (Mips::CondCode)CCNode->getAsConstantVal();
unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T;
SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 0ed87ee0809a3e..7b386b024803a8 100644
--- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -76,7 +76,7 @@ void MipsSEDAGToDAGISel::addDSPCtrlRegOperands(bool IsDef, MachineInstr &MI,
}
unsigned MipsSEDAGToDAGISel::getMSACtrlReg(const SDValue RegIdx) const {
- uint64_t RegNum = cast<ConstantSDNode>(RegIdx)->getZExtValue();
+ uint64_t RegNum = RegIdx->getAsConstantVal();
return Mips::MSACtrlRegClass.getRegister(RegNum);
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 815c46edb6fa2a..2c9966696ed156 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -2076,7 +2076,7 @@ bool NVPTXDAGToDAGISel::tryLoadParam(SDNode *Node) {
VTs = CurDAG->getVTList(EVTs);
}
- unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
+ unsigned OffsetVal = Offset->getAsConstantVal();
SmallVector<SDValue, 2> Ops;
Ops.push_back(CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32));
@@ -2091,7 +2091,7 @@ bool NVPTXDAGToDAGISel::tryStoreRetval(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
SDValue Offset = N->getOperand(1);
- unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
+ unsigned OffsetVal = Offset->getAsConstantVal();
MemSDNode *Mem = cast<MemSDNode>(N);
// How many elements do we have?
@@ -2158,9 +2158,9 @@ bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
SDValue Param = N->getOperand(1);
- unsigned ParamVal = cast<ConstantSDNode>(Param)->getZExtValue();
+ unsigned ParamVal = Param->getAsConstantVal();
SDValue Offset = N->getOperand(2);
- unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
+ unsigned OffsetVal = Offset->getAsConstantVal();
MemSDNode *Mem = cast<MemSDNode>(N);
SDValue Glue = N->getOperand(N->getNumOperands() - 1);
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index e8f36bf50a1b08..693e75bdd6ff1c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -5811,7 +5811,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
SDLoc DL(N);
// Get the intrinsic ID
- unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
+ unsigned IntrinNo = Intrin.getNode()->getAsConstantVal();
switch (IntrinNo) {
default:
return;
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index ed96339240d926..35b0f0a706480d 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -565,7 +565,7 @@ static bool hasTocDataAttr(SDValue Val, unsigned PointerSize) {
/// operand. If so Imm will receive the 32-bit value.
static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
- Imm = cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = N->getAsConstantVal();
return true;
}
return false;
@@ -575,7 +575,7 @@ static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
/// operand. If so Imm will receive the 64-bit value.
static bool isInt64Immediate(SDNode *N, uint64_t &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i64) {
- Imm = cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = N->getAsConstantVal();
return true;
}
return false;
@@ -1500,7 +1500,7 @@ static SDNode *selectI64Imm(SelectionDAG *CurDAG, SDNode *N) {
SDLoc dl(N);
// Get 64 bit value.
- int64_t Imm = cast<ConstantSDNode>(N)->getZExtValue();
+ int64_t Imm = N->getAsConstantVal();
if (unsigned MinSize = allUsesTruncate(CurDAG, N)) {
uint64_t SextImm = SignExtend64(Imm, MinSize);
SDValue SDImm = CurDAG->getTargetConstant(SextImm, dl, MVT::i64);
@@ -4923,7 +4923,7 @@ bool PPCDAGToDAGISel::trySelectLoopCountIntrinsic(SDNode *N) {
SDNode *NewDecrement = CurDAG->getMachineNode(DecrementOpcode, DecrementLoc,
MVT::i1, DecrementOps);
- unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
+ unsigned Val = RHS->getAsConstantVal();
bool IsBranchOnTrue = (CC == ISD::SETEQ && Val) || (CC == ISD::SETNE && !Val);
unsigned Opcode = IsBranchOnTrue ? PPC::BC : PPC::BCn;
@@ -5765,7 +5765,7 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
break;
// If the multiplier fits int16, we can handle it with mulli.
- int64_t Imm = cast<ConstantSDNode>(Op1)->getZExtValue();
+ int64_t Imm = Op1->getAsConstantVal();
unsigned Shift = llvm::countr_zero<uint64_t>(Imm);
if (isInt<16>(Imm) || !Shift)
break;
@@ -6612,8 +6612,8 @@ void PPCDAGToDAGISel::foldBoolExts(SDValue &Res, SDNode *&N) {
// For us to materialize these using one instruction, we must be able to
// represent them as signed 16-bit integers.
- uint64_t True = cast<ConstantSDNode>(TrueRes)->getZExtValue(),
- False = cast<ConstantSDNode>(FalseRes)->getZExtValue();
+ uint64_t True = TrueRes->getAsConstantVal(),
+ False = FalseRes->getAsConstantVal();
if (!isInt<16>(True) || !isInt<16>(False))
break;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 8f27e6677afa5e..5c865dae018e17 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2566,7 +2566,7 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
if (LeadingZero) {
if (!UniquedVals[Multiple-1].getNode())
return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef
- int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
+ int Val = UniquedVals[Multiple - 1]->getAsConstantVal();
if (Val < 16) // 0,0,0,4 -> vspltisw(4)
return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
}
@@ -2635,11 +2635,11 @@ bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
if (!isa<ConstantSDNode>(N))
return false;
- Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = (int16_t)N->getAsConstantVal();
if (N->getValueType(0) == MVT::i32)
- return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
+ return Imm == (int32_t)N->getAsConstantVal();
else
- return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
+ return Imm == (int64_t)N->getAsConstantVal();
}
bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
return isIntS16Immediate(Op.getNode(), Imm);
@@ -2684,7 +2684,7 @@ bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {
if (!isa<ConstantSDNode>(N))
return false;
- Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = (int64_t)N->getAsConstantVal();
return isInt<34>(Imm);
}
bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) {
@@ -15580,7 +15580,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
NarrowOp.getOpcode() != ISD::ROTL && NarrowOp.getOpcode() != ISD::ROTR)
break;
- uint64_t Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ uint64_t Imm = Op2->getAsConstantVal();
// Make sure that the constant is narrow enough to fit in the narrow type.
if (!isUInt<32>(Imm))
break;
@@ -16795,7 +16795,7 @@ void PPCTargetLowering::CollectTargetIntrinsicOperands(const CallInst &I,
return;
if (!isa<ConstantSDNode>(Ops[1].getNode()))
return;
- auto IntrinsicID = cast<ConstantSDNode>(Ops[1].getNode())->getZExtValue();
+ auto IntrinsicID = Ops[1].getNode()->getAsConstantVal();
if (IntrinsicID != Intrinsic::ppc_tdw && IntrinsicID != Intrinsic::ppc_tw &&
IntrinsicID != Intrinsic::ppc_trapd && IntrinsicID != Intrinsic::ppc_trap)
return;
@@ -18430,7 +18430,7 @@ PPC::AddrMode PPCTargetLowering::SelectOptimalAddrMode(const SDNode *Parent,
if (Flags & PPC::MOF_RPlusSImm16) {
SDValue Op0 = N.getOperand(0);
SDValue Op1 = N.getOperand(1);
- int16_t Imm = cast<ConstantSDNode>(Op1)->getZExtValue();
+ int16_t Imm = Op1->getAsConstantVal();
if (!Align || isAligned(*Align, Imm)) {
Disp = DAG.getTargetConstant(Imm, DL, N.getValueType());
Base = Op0;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 03a59f8a8b57ca..76bcdf98e89b55 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3489,7 +3489,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
for (unsigned I = 0; I < NumElts;) {
SDValue V = Op.getOperand(I);
- bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
+ bool BitValue = !V.isUndef() && V->getAsConstantVal();
Bits |= ((uint64_t)BitValue << BitPos);
++BitPos;
++I;
@@ -3620,7 +3620,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
for (const auto &OpIdx : enumerate(Op->op_values())) {
const auto &SeqV = OpIdx.value();
if (!SeqV.isUndef())
- SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
+ SplatValue |= ((SeqV->getAsConstantVal() & EltMask)
<< (OpIdx.index() * EltBitSize));
}
@@ -3676,8 +3676,8 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
// vector type.
for (const auto &SeqV : Sequence) {
if (!SeqV.isUndef())
- SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
- << (EltIdx * EltBitSize));
+ SplatValue |=
+ ((SeqV->getAsConstantVal() & EltMask) << (EltIdx * EltBitSize));
EltIdx++;
}
@@ -3938,8 +3938,7 @@ static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
(isa<RegisterSDNode>(VL) &&
cast<RegisterSDNode>(VL)->getReg() == RISCV::X0))
NewVL = DAG.getRegister(RISCV::X0, MVT::i32);
- else if (isa<ConstantSDNode>(VL) &&
- isUInt<4>(cast<ConstantSDNode>(VL)->getZExtValue()))
+ else if (isa<ConstantSDNode>(VL) && isUInt<4>(VL->getAsConstantVal()))
NewVL = DAG.getNode(ISD::ADD, DL, VL.getValueType(), VL, VL);
if (NewVL) {
@@ -7906,8 +7905,7 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
// Use tail agnostic policy if Idx is the last index of Vec.
unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
if (VecVT.isFixedLengthVector() && isa<ConstantSDNode>(Idx) &&
- cast<ConstantSDNode>(Idx)->getZExtValue() + 1 ==
- VecVT.getVectorNumElements())
+ Idx->getAsConstantVal() + 1 == VecVT.getVectorNumElements())
Policy = RISCVII::TAIL_AGNOSTIC;
SDValue Slideup = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, ValInVec,
Idx, Mask, InsertVL, Policy);
@@ -8167,7 +8165,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
const auto [MinVLMAX, MaxVLMAX] =
RISCVTargetLowering::computeVLMAXBounds(VT, Subtarget);
- uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
+ uint64_t AVLInt = AVL->getAsConstantVal();
if (AVLInt <= MinVLMAX) {
I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
} else if (AVLInt >= 2 * MaxVLMAX) {
@@ -8233,8 +8231,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
SDValue Mask = Operands[NumOps - 3];
SDValue MaskedOff = Operands[1];
// Assume Policy operand is the last operand.
- uint64_t Policy =
- cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
+ uint64_t Policy = Operands[NumOps - 1]->getAsConstantVal();
// We don't need to select maskedoff if it's undef.
if (MaskedOff.isUndef())
return Vec;
diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index c7d8591c5bdf6f..c0767f6ae4cd78 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -1641,7 +1641,7 @@ void SystemZDAGToDAGISel::Select(SDNode *Node) {
// If this is a 64-bit constant that is out of the range of LLILF,
// LLIHF and LGFI, split it into two 32-bit pieces.
if (Node->getValueType(0) == MVT::i64) {
- uint64_t Val = cast<ConstantSDNode>(Node)->getZExtValue();
+ uint64_t Val = Node->getAsConstantVal();
if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val)) {
splitLargeImmediate(ISD::OR, Node, SDValue(), Val - uint32_t(Val),
uint32_t(Val));
@@ -1677,10 +1677,8 @@ void SystemZDAGToDAGISel::Select(SDNode *Node) {
isInt<16>(cast<ConstantSDNode>(Op0)->getSExtValue())))) {
SDValue CCValid = Node->getOperand(2);
SDValue CCMask = Node->getOperand(3);
- uint64_t ConstCCValid =
- cast<ConstantSDNode>(CCValid.getNode())->getZExtValue();
- uint64_t ConstCCMask =
- cast<ConstantSDNode>(CCMask.getNode())->getZExtValue();
+ uint64_t ConstCCValid = CCValid.getNode()->getAsConstantVal();
+ uint64_t ConstCCMask = CCMask.getNode()->getAsConstantVal();
// Invert the condition.
CCMask = CurDAG->getTargetConstant(ConstCCValid ^ ConstCCMask,
SDLoc(Node), CCMask.getValueType());
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 045c4c0aac07ae..744951933ed879 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -2662,10 +2662,8 @@ static void adjustForFNeg(Comparison &C) {
// with (sext (trunc X)) into a comparison with (shl X, 32).
static void adjustForLTGFR(Comparison &C) {
// Check for a comparison between (shl X, 32) and 0.
- if (C.Op0.getOpcode() == ISD::SHL &&
- C.Op0.getValueType() == MVT::i64 &&
- C.Op1.getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ if (C.Op0.getOpcode() == ISD::SHL && C.Op0.getValueType() == MVT::i64 &&
+ C.Op1.getOpcode() == ISD::Constant && C.Op1->getAsConstantVal() == 0) {
auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
if (C1 && C1->getZExtValue() == 32) {
SDValue ShlOp0 = C.Op0.getOperand(0);
@@ -2690,7 +2688,7 @@ static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
C.Op1.getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(C.Op1)->getValueSizeInBits(0) <= 64 &&
- cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ C.Op1->getAsConstantVal() == 0) {
auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
C.Op0.getValueSizeInBits().getFixedValue()) {
@@ -3035,12 +3033,12 @@ static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid,
- cast<ConstantSDNode>(CmpOp1)->getZExtValue(), Cond);
+ CmpOp1->getAsConstantVal(), Cond);
if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 &&
isIntrinsicWithCC(CmpOp0, Opcode, CCValid))
return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid,
- cast<ConstantSDNode>(CmpOp1)->getZExtValue(), Cond);
+ CmpOp1->getAsConstantVal(), Cond);
}
Comparison C(CmpOp0, CmpOp1, Chain);
C.CCMask = CCMaskForCondCode(Cond);
@@ -3457,12 +3455,11 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
// Check for absolute and negative-absolute selections, including those
// where the comparison value is sign-extended (for LPGFR and LNGFR).
// This check supplements the one in DAGCombiner.
- if (C.Opcode == SystemZISD::ICMP &&
- C.CCMask != SystemZ::CCMASK_CMP_EQ &&
+ if (C.Opcode == SystemZISD::ICMP && C.CCMask != SystemZ::CCMASK_CMP_EQ &&
C.CCMask != SystemZ::CCMASK_CMP_NE &&
C.Op1.getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(C.Op1)->getValueSizeInBits(0) <= 64 &&
- cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ C.Op1->getAsConstantVal() == 0) {
if (isAbsolute(C.Op0, TrueOp, FalseOp))
return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
if (isAbsolute(C.Op0, FalseOp, TrueOp))
@@ -3947,8 +3944,7 @@ SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(SDValue Op,
// If user has set the no alignment function attribute, ignore
// alloca alignments.
- uint64_t AlignVal =
- (RealignOpt ? cast<ConstantSDNode>(Align)->getZExtValue() : 0);
+ uint64_t AlignVal = (RealignOpt ? Align->getAsConstantVal() : 0);
uint64_t StackAlign = TFI->getStackAlignment();
uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
@@ -4013,8 +4009,7 @@ SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(SDValue Op,
// If user has set the no alignment function attribute, ignore
// alloca alignments.
- uint64_t AlignVal =
- (RealignOpt ? cast<ConstantSDNode>(Align)->getZExtValue() : 0);
+ uint64_t AlignVal = (RealignOpt ? Align->getAsConstantVal() : 0);
uint64_t StackAlign = TFI->getStackAlignment();
uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
@@ -4213,7 +4208,7 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
// If the low part is a constant that is outside the range of LHI,
// then we're better off using IILF.
if (LowOp.getOpcode() == ISD::Constant) {
- int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
+ int64_t Value = int32_t(LowOp->getAsConstantVal());
if (!isInt<16>(Value))
return Op;
}
@@ -5897,7 +5892,7 @@ SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
Op1.getOpcode() != ISD::BITCAST &&
Op1.getOpcode() != ISD::ConstantFP &&
Op2.getOpcode() == ISD::Constant) {
- uint64_t Index = cast<ConstantSDNode>(Op2)->getZExtValue();
+ uint64_t Index = Op2->getAsConstantVal();
unsigned Mask = VT.getVectorNumElements() - 1;
if (Index <= Mask)
return Op;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 4bcf89690505ed..71c5430f1f9efa 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -1869,8 +1869,7 @@ SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
Ops[OpIdx++] = Op.getOperand(2);
while (OpIdx < 18) {
const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
- if (MaskIdx.isUndef() ||
- cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
+ if (MaskIdx.isUndef() || MaskIdx.getNode()->getAsConstantVal() >= 32) {
bool isTarget = MaskIdx.getNode()->getOpcode() == ISD::TargetConstant;
Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32, isTarget);
} else {
@@ -1912,7 +1911,7 @@ WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
const SDNode *Index = Extract.getOperand(1).getNode();
if (!isa<ConstantSDNode>(Index))
return SDValue();
- unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
+ unsigned IndexVal = Index->getAsConstantVal();
unsigned Scale =
ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
assert(Scale > 1);
@@ -2335,7 +2334,7 @@ WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
if (isa<ConstantSDNode>(IdxNode)) {
// Ensure the index type is i32 to match the tablegen patterns
- uint64_t Idx = cast<ConstantSDNode>(IdxNode)->getZExtValue();
+ uint64_t Idx = IdxNode->getAsConstantVal();
SmallVector<SDValue, 3> Ops(Op.getNode()->ops());
Ops[Op.getNumOperands() - 1] =
DAG.getConstant(Idx, SDLoc(IdxNode), MVT::i32);
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 846eab93e1fea4..5e8eb37af8bfab 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -2852,7 +2852,7 @@ bool X86DAGToDAGISel::selectVectorAddr(MemSDNode *Parent, SDValue BasePtr,
SDValue &Index, SDValue &Disp,
SDValue &Segment) {
X86ISelAddressMode AM;
- AM.Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
+ AM.Scale = ScaleOp->getAsConstantVal();
// Attempt to match index patterns, as long as we're not relying on implicit
// sign-extension, which is performed BEFORE scale.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1e4b1361f98a6f..12afc563f80464 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -7371,7 +7371,7 @@ static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
/// index.
static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
SDValue ExtIdx) {
- int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
+ int Idx = ExtIdx->getAsConstantVal();
if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
return Idx;
@@ -8793,7 +8793,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
- unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
+ unsigned InsertC = InsIndex->getAsConstantVal();
unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
if (InsertC < NumEltsInLow128Bits)
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
@@ -17747,7 +17747,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
DAG.getBitcast(MVT::v4i32, Vec), Idx));
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ unsigned IdxVal = Idx->getAsConstantVal();
SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, Vec,
DAG.getTargetConstant(IdxVal, dl, MVT::i8));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
@@ -24061,7 +24061,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// a >= b ? -1 : 0 -> RES = setcc_carry
// a >= b ? 0 : -1 -> RES = ~setcc_carry
if (Cond.getOpcode() == X86ISD::SUB) {
- unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
+ unsigned CondCode = CC->getAsConstantVal();
if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
(isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
@@ -25359,8 +25359,8 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
if (IntrData->Type == INTR_TYPE_3OP_IMM8 &&
Src3.getValueType() != MVT::i8) {
- Src3 = DAG.getTargetConstant(
- cast<ConstantSDNode>(Src3)->getZExtValue() & 0xff, dl, MVT::i8);
+ Src3 =
+ DAG.getTargetConstant(Src3->getAsConstantVal() & 0xff, dl, MVT::i8);
}
// We specify 2 possible opcodes for intrinsics with rounding modes.
@@ -25385,8 +25385,8 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
assert(Op.getOperand(4)->getOpcode() == ISD::TargetConstant);
SDValue Src4 = Op.getOperand(4);
if (Src4.getValueType() != MVT::i8) {
- Src4 = DAG.getTargetConstant(
- cast<ConstantSDNode>(Src4)->getZExtValue() & 0xff, dl, MVT::i8);
+ Src4 =
+ DAG.getTargetConstant(Src4->getAsConstantVal() & 0xff, dl, MVT::i8);
}
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
@@ -26788,7 +26788,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
{Chain, Op1, Op2, Size}, VT, MMO);
Chain = Res.getValue(1);
Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
if (Imm)
Res = DAG.getNode(ISD::SHL, DL, VT, Res,
DAG.getShiftAmountConstant(Imm, VT, DL));
@@ -41795,7 +41795,7 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
- unsigned ShAmt = cast<ConstantSDNode>(Op1)->getZExtValue();
+ unsigned ShAmt = Op1->getAsConstantVal();
if (ShAmt >= BitWidth)
break;
@@ -42580,7 +42580,7 @@ static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
APInt Imm(SrcVT.getVectorNumElements(), 0);
for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
SDValue In = Op.getOperand(Idx);
- if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
+ if (!In.isUndef() && (In->getAsConstantVal() & 0x1))
Imm.setBit(Idx);
}
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
@@ -53258,7 +53258,7 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
if (Index.getOpcode() == ISD::ADD &&
Index.getValueType().getVectorElementType() == PtrVT &&
isa<ConstantSDNode>(Scale)) {
- uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
+ uint64_t ScaleAmt = Scale->getAsConstantVal();
if (auto *BV = dyn_cast<BuildVectorSDNode>(Index.getOperand(1))) {
BitVector UndefElts;
if (ConstantSDNode *C = BV->getConstantSplatNode(&UndefElts)) {
diff --git a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
index 05003ec304adc5..cc95c3f5db343c 100644
--- a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
@@ -142,7 +142,7 @@ void XCoreDAGToDAGISel::Select(SDNode *N) {
switch (N->getOpcode()) {
default: break;
case ISD::Constant: {
- uint64_t Val = cast<ConstantSDNode>(N)->getZExtValue();
+ uint64_t Val = N->getAsConstantVal();
if (immMskBitp(N)) {
// Transformation function: get the size of a mask
// Look for the first non-zero bit
>From aee4d945100bbee598cb92b789d691196fdc1b0a Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Tue, 2 Jan 2024 14:20:22 +0000
Subject: [PATCH 2/2] Rename to getAsZExtVal
---
llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 6 +++--
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 2 +-
.../lib/CodeGen/SelectionDAG/InstrEmitter.cpp | 4 ++--
.../SelectionDAG/LegalizeFloatTypes.cpp | 2 +-
.../SelectionDAG/LegalizeIntegerTypes.cpp | 2 +-
.../SelectionDAG/LegalizeVectorTypes.cpp | 10 ++++-----
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 4 ++--
.../SelectionDAG/SelectionDAGBuilder.cpp | 22 +++++++++----------
.../SelectionDAG/SelectionDAGDumper.cpp | 2 +-
.../CodeGen/SelectionDAG/SelectionDAGISel.cpp | 10 ++++-----
.../Target/AArch64/AArch64ISelDAGToDAG.cpp | 6 ++---
.../Target/AArch64/AArch64ISelLowering.cpp | 10 ++++-----
.../AArch64/AArch64SelectionDAGInfo.cpp | 2 +-
llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/AMDGPU/R600ISelLowering.cpp | 8 +++----
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 6 ++---
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 4 ++--
llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp | 22 +++++++++----------
llvm/lib/Target/ARM/ARMISelLowering.cpp | 16 +++++++-------
llvm/lib/Target/AVR/AVRISelLowering.cpp | 4 ++--
.../Target/Hexagon/HexagonISelDAGToDAG.cpp | 2 +-
.../Target/Hexagon/HexagonISelLoweringHVX.cpp | 6 ++---
.../LoongArch/LoongArchISelLowering.cpp | 18 +++++++--------
llvm/lib/Target/M68k/M68kISelLowering.cpp | 4 ++--
llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp | 4 ++--
llvm/lib/Target/MSP430/MSP430ISelLowering.cpp | 2 +-
llvm/lib/Target/Mips/MipsISelLowering.cpp | 2 +-
llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 8 +++----
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 2 +-
llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp | 14 ++++++------
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 16 +++++++-------
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 14 ++++++------
.../Target/SystemZ/SystemZISelDAGToDAG.cpp | 6 ++---
.../Target/SystemZ/SystemZISelLowering.cpp | 18 +++++++--------
.../WebAssembly/WebAssemblyISelLowering.cpp | 6 ++---
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/X86/X86ISelLowering.cpp | 20 ++++++++---------
llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp | 2 +-
40 files changed, 148 insertions(+), 146 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index d13badd83f5493..900ebf94c939de 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -923,7 +923,9 @@ END_TWO_BYTE_PACK()
/// Helper method returns the integer value of a ConstantSDNode operand.
inline uint64_t getConstantOperandVal(unsigned Num) const;
- inline uint64_t getAsConstantVal() const;
+
+ /// Helper method returns the zero-extended integer value of a ConstantSDNode.
+ inline uint64_t getAsZExtVal() const;
/// Helper method returns the APInt of a ConstantSDNode operand.
inline const APInt &getConstantOperandAPInt(unsigned Num) const;
@@ -1641,7 +1643,7 @@ uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
}
-uint64_t SDNode::getAsConstantVal() const {
+uint64_t SDNode::getAsZExtVal() const {
return cast<ConstantSDNode>(this)->getZExtValue();
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 9a41531ce9fba6..50917588e8efd2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -14709,7 +14709,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue EltNo = N0->getOperand(1);
if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
- int Elt = EltNo->getAsConstantVal();
+ int Elt = EltNo->getAsZExtVal();
int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
SDLoc DL(N);
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 248f26ce5cecb1..032cff416cda93 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -551,7 +551,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
SDValue N0 = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
SDValue N2 = Node->getOperand(2);
- unsigned SubIdx = N2->getAsConstantVal();
+ unsigned SubIdx = N2->getAsZExtVal();
// Figure out the register class to create for the destreg. It should be
// the largest legal register class supporting SubIdx sub-registers.
@@ -650,7 +650,7 @@ void InstrEmitter::EmitRegSequence(SDNode *Node,
// Skip physical registers as they don't have a vreg to get and we'll
// insert copies for them in TwoAddressInstructionPass anyway.
if (!R || !R->getReg().isPhysical()) {
- unsigned SubIdx = Op->getAsConstantVal();
+ unsigned SubIdx = Op->getAsZExtVal();
unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
const TargetRegisterClass *SRC =
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 00522535daca1f..c3c3fafe327474 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -2490,7 +2490,7 @@ SDValue DAGTypeLegalizer::PromoteFloatRes_EXTRACT_VECTOR_ELT(SDNode *N) {
EVT VecVT = Vec->getValueType(0);
EVT EltVT = VecVT.getVectorElementType();
- uint64_t IdxVal = Idx->getAsConstantVal();
+ uint64_t IdxVal = Idx->getAsZExtVal();
switch (getTypeAction(VecVT)) {
default: break;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 5a848fce42b689..0211a108b5b9f0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -5557,7 +5557,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N) {
getTypeAction(InVT) == TargetLowering::TypeLegal) {
EVT NInVT = InVT.getHalfNumVectorElementsVT(*DAG.getContext());
unsigned NElts = NInVT.getVectorMinNumElements();
- uint64_t IdxVal = BaseIdx->getAsConstantVal();
+ uint64_t IdxVal = BaseIdx->getAsZExtVal();
SDValue Step1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NInVT, InOp0,
DAG.getConstant(alignDown(IdxVal, NElts), dl,
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 475f6fba60ec9a..ec74d2940099f9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1442,7 +1442,7 @@ void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo,
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, LoVT, Vec, Idx);
- uint64_t IdxVal = Idx->getAsConstantVal();
+ uint64_t IdxVal = Idx->getAsZExtVal();
Hi = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, HiVT, Vec,
DAG.getVectorIdxConstant(IdxVal + LoVT.getVectorMinNumElements(), dl));
@@ -1466,7 +1466,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo,
// If we know the index is in the first half, and we know the subvector
// doesn't cross the boundary between the halves, we can avoid spilling the
// vector, and insert into the lower half of the split vector directly.
- unsigned IdxVal = Idx->getAsConstantVal();
+ unsigned IdxVal = Idx->getAsZExtVal();
if (IdxVal + SubElems <= LoElems) {
Lo = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, LoVT, Lo, SubVec, Idx);
return;
@@ -3279,7 +3279,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_INSERT_SUBVECTOR(SDNode *N,
SDValue Lo, Hi;
GetSplitVector(SubVec, Lo, Hi);
- uint64_t IdxVal = Idx->getAsConstantVal();
+ uint64_t IdxVal = Idx->getAsZExtVal();
uint64_t LoElts = Lo.getValueType().getVectorMinNumElements();
SDValue FirstInsertion =
@@ -3301,7 +3301,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
GetSplitVector(N->getOperand(0), Lo, Hi);
uint64_t LoEltsMin = Lo.getValueType().getVectorMinNumElements();
- uint64_t IdxVal = Idx->getAsConstantVal();
+ uint64_t IdxVal = Idx->getAsZExtVal();
if (IdxVal < LoEltsMin) {
assert(IdxVal + SubVT.getVectorMinNumElements() <= LoEltsMin &&
@@ -5257,7 +5257,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
EVT InVT = InOp.getValueType();
// Check if we can just return the input vector after widening.
- uint64_t IdxVal = Idx->getAsConstantVal();
+ uint64_t IdxVal = Idx->getAsZExtVal();
if (IdxVal == 0 && InVT == WidenVT)
return InOp;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 91a0caad55b19e..f13a5e607852da 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -7193,7 +7193,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
assert(isa<ConstantSDNode>(N3) &&
"Insert subvector index must be constant");
assert((VT.isScalableVector() != N2VT.isScalableVector() ||
- (N2VT.getVectorMinNumElements() + N3->getAsConstantVal()) <=
+ (N2VT.getVectorMinNumElements() + N3->getAsZExtVal()) <=
VT.getVectorMinNumElements()) &&
"Insert subvector overflow!");
assert(cast<ConstantSDNode>(N3)->getAPIntValue().getBitWidth() ==
@@ -9982,7 +9982,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
Ops[1].getValueType().isFloatingPoint() &&
VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
isa<ConstantSDNode>(Ops[2]) &&
- (Ops[2]->getAsConstantVal() == 0 || Ops[2]->getAsConstantVal() == 1) &&
+ (Ops[2]->getAsZExtVal() == 0 || Ops[2]->getAsZExtVal() == 1) &&
"Invalid STRICT_FP_ROUND!");
break;
#if 0
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index ca37b3da065d1c..ceb8c0b47d9304 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -5642,7 +5642,7 @@ static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
// expansion/promotion) if it was possible to expand a libcall of an
// illegal type during operation legalization. But it's not, so things
// get a bit hacky.
- unsigned ScaleInt = Scale->getAsConstantVal();
+ unsigned ScaleInt = Scale->getAsZExtVal();
if ((ScaleInt > 0 || (Saturating && Signed)) &&
(TLI.isTypeLegal(VT) ||
(VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
@@ -7655,7 +7655,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// suitable for the target. Convert the index as required.
MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
if (Index.getValueType() != VectorIdxTy)
- Index = DAG.getVectorIdxConstant(Index->getAsConstantVal(), sdl);
+ Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
@@ -7671,7 +7671,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// suitable for the target. Convert the index as required.
MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
if (Index.getValueType() != VectorIdxTy)
- Index = DAG.getVectorIdxConstant(Index->getAsConstantVal(), sdl);
+ Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
setValue(&I,
DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
@@ -8134,7 +8134,7 @@ void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
case ISD::VP_IS_FPCLASS: {
const DataLayout DLayout = DAG.getDataLayout();
EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
- auto Constant = OpValues[1]->getAsConstantVal();
+ auto Constant = OpValues[1]->getAsZExtVal();
SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
{OpValues[0], Check, OpValues[2], OpValues[3]});
@@ -9171,7 +9171,7 @@ findMatchingInlineAsmOperand(unsigned OperandNo,
unsigned CurOp = InlineAsm::Op_FirstOperand;
for (; OperandNo; --OperandNo) {
// Advance to the next operand.
- unsigned OpFlag = AsmNodeOperands[CurOp]->getAsConstantVal();
+ unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
const InlineAsm::Flag F(OpFlag);
assert(
(F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
@@ -9477,7 +9477,7 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
// just use its register.
auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
AsmNodeOperands);
- InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsConstantVal());
+ InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
if (OpInfo.isIndirect) {
// This happens on gcc/testsuite/gcc.dg/pr8788-1.c
@@ -9982,13 +9982,13 @@ void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
SDValue ID = getValue(CI.getArgOperand(0));
assert(ID.getValueType() == MVT::i64);
SDValue IDConst =
- DAG.getTargetConstant(ID->getAsConstantVal(), DL, ID.getValueType());
+ DAG.getTargetConstant(ID->getAsZExtVal(), DL, ID.getValueType());
Ops.push_back(IDConst);
SDValue Shad = getValue(CI.getArgOperand(1));
assert(Shad.getValueType() == MVT::i32);
SDValue ShadConst =
- DAG.getTargetConstant(Shad->getAsConstantVal(), DL, Shad.getValueType());
+ DAG.getTargetConstant(Shad->getAsZExtVal(), DL, Shad.getValueType());
Ops.push_back(ShadConst);
// Add the live variables.
@@ -10037,7 +10037,7 @@ void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
// Get the real number of arguments participating in the call <numArgs>
SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
- unsigned NumArgs = NArgVal->getAsConstantVal();
+ unsigned NumArgs = NArgVal->getAsZExtVal();
// Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
// Intrinsics include all meta-operands up to but not including CC.
@@ -10084,10 +10084,10 @@ void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
// Add the <id> and <numBytes> constants.
SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
- Ops.push_back(DAG.getTargetConstant(IDVal->getAsConstantVal(), dl, MVT::i64));
+ Ops.push_back(DAG.getTargetConstant(IDVal->getAsZExtVal(), dl, MVT::i64));
SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
Ops.push_back(
- DAG.getTargetConstant(NBytesVal->getAsConstantVal(), dl, MVT::i32));
+ DAG.getTargetConstant(NBytesVal->getAsZExtVal(), dl, MVT::i32));
// Add the callee.
Ops.push_back(Callee);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 93fe7f49c6aee3..09b2b63ab9d60b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -149,7 +149,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN: {
unsigned OpNo = getOpcode() == ISD::INTRINSIC_WO_CHAIN ? 0 : 1;
- unsigned IID = getOperand(OpNo)->getAsConstantVal();
+ unsigned IID = getOperand(OpNo)->getAsZExtVal();
if (IID < Intrinsic::num_intrinsics)
return Intrinsic::getBaseName((Intrinsic::ID)IID).str();
if (!G)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 72b03e00fd90ae..363e94cc0fcc75 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -2121,7 +2121,7 @@ void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
--e; // Don't process a glue operand if it is here.
while (i != e) {
- InlineAsm::Flag Flags(InOps[i]->getAsConstantVal());
+ InlineAsm::Flag Flags(InOps[i]->getAsZExtVal());
if (!Flags.isMemKind() && !Flags.isFuncKind()) {
// Just skip over this operand, copying the operands verbatim.
Ops.insert(Ops.end(), InOps.begin() + i,
@@ -2135,10 +2135,10 @@ void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
if (Flags.isUseOperandTiedToDef(TiedToOperand)) {
// We need the constraint ID from the operand this is tied to.
unsigned CurOp = InlineAsm::Op_FirstOperand;
- Flags = InlineAsm::Flag(InOps[CurOp]->getAsConstantVal());
+ Flags = InlineAsm::Flag(InOps[CurOp]->getAsZExtVal());
for (; TiedToOperand; --TiedToOperand) {
CurOp += Flags.getNumOperandRegisters() + 1;
- Flags = InlineAsm::Flag(InOps[CurOp]->getAsConstantVal());
+ Flags = InlineAsm::Flag(InOps[CurOp]->getAsZExtVal());
}
}
@@ -2378,7 +2378,7 @@ void SelectionDAGISel::pushStackMapLiveVariable(SmallVectorImpl<SDValue> &Ops,
if (OpNode->getOpcode() == ISD::Constant) {
Ops.push_back(
CurDAG->getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
- Ops.push_back(CurDAG->getTargetConstant(OpNode->getAsConstantVal(), DL,
+ Ops.push_back(CurDAG->getTargetConstant(OpNode->getAsZExtVal(), DL,
OpVal.getValueType()));
} else {
Ops.push_back(OpVal);
@@ -2449,7 +2449,7 @@ void SelectionDAGISel::Select_PATCHPOINT(SDNode *N) {
Ops.push_back(*It++);
// Push the args for the call.
- for (uint64_t I = NumArgs->getAsConstantVal(); I != 0; I--)
+ for (uint64_t I = NumArgs->getAsZExtVal(); I != 0; I--)
Ops.push_back(*It++);
// Now push the live variables.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index e28258b7025316..edc8cc7d4d1e69 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -580,7 +580,7 @@ bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
if (!isa<ConstantSDNode>(N.getNode()))
return false;
- uint64_t Immed = N.getNode()->getAsConstantVal();
+ uint64_t Immed = N.getNode()->getAsZExtVal();
unsigned ShiftAmt;
if (Immed >> 12 == 0) {
@@ -611,7 +611,7 @@ bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
return false;
// The immediate operand must be a 24-bit zero-extended immediate.
- uint64_t Immed = N.getNode()->getAsConstantVal();
+ uint64_t Immed = N.getNode()->getAsZExtVal();
// This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
// have the opposite effect on the C flag, so this pattern mustn't match under
@@ -1326,7 +1326,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
// MOV X0, WideImmediate
// LDR X2, [BaseReg, X0]
if (isa<ConstantSDNode>(RHS)) {
- int64_t ImmOff = (int64_t)RHS->getAsConstantVal();
+ int64_t ImmOff = (int64_t)RHS->getAsZExtVal();
// Skip the immediate can be selected by load/store addressing mode.
// Also skip the immediate can be encoded by a single ADD (SUB is also
// checked by using -ImmOff).
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 37d0ef90554e1d..94d604ed1fbbea 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3589,7 +3589,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// can be turned into:
// cmp w12, w11, lsl #1
if (!isa<ConstantSDNode>(RHS) ||
- !isLegalArithImmed(RHS->getAsConstantVal())) {
+ !isLegalArithImmed(RHS->getAsZExtVal())) {
SDValue TheLHS = isCMN(LHS, CC) ? LHS.getOperand(1) : LHS;
if (getCmpOperandFoldingProfit(TheLHS) > getCmpOperandFoldingProfit(RHS)) {
@@ -3623,7 +3623,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD &&
cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 &&
LHS.getNode()->hasNUsesOfValue(1, 0)) {
- int16_t ValueofRHS = RHS->getAsConstantVal();
+ int16_t ValueofRHS = RHS->getAsZExtVal();
if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) {
SDValue SExt =
DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS,
@@ -5619,7 +5619,7 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
// SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
// must be calculated before hand.
- uint64_t ScaleVal = Scale->getAsConstantVal();
+ uint64_t ScaleVal = Scale->getAsZExtVal();
if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
EVT IndexVT = Index.getValueType();
@@ -5707,7 +5707,7 @@ SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
// SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
// must be calculated before hand.
- uint64_t ScaleVal = Scale->getAsConstantVal();
+ uint64_t ScaleVal = Scale->getAsZExtVal();
if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
EVT IndexVT = Index.getValueType();
@@ -22011,7 +22011,7 @@ static SDValue performBRCONDCombine(SDNode *N,
SDValue Cmp = N->getOperand(3);
assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!");
- unsigned CC = CCVal->getAsConstantVal();
+ unsigned CC = CCVal->getAsZExtVal();
if (CC != AArch64CC::EQ && CC != AArch64CC::NE)
return SDValue();
diff --git a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
index 3415b533047d1a..9e43f206efcf78 100644
--- a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
@@ -172,7 +172,7 @@ static SDValue EmitUnrolledSetTag(SelectionDAG &DAG, const SDLoc &dl,
SDValue AArch64SelectionDAGInfo::EmitTargetCodeForSetTag(
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr,
SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const {
- uint64_t ObjSize = Size->getAsConstantVal();
+ uint64_t ObjSize = Size->getAsZExtVal();
assert(ObjSize % 16 == 0);
MachineFunction &MF = DAG.getMachineFunction();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 3a78024dd79c2d..7d4e522953aea5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -382,7 +382,7 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
Subtarget->getRegisterInfo()->getRegClass(RCID);
SDValue SubRegOp = N->getOperand(OpNo + 1);
- unsigned SubRegIdx = SubRegOp->getAsConstantVal();
+ unsigned SubRegIdx = SubRegOp->getAsZExtVal();
return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
SubRegIdx);
}
diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index 6b43e47dc8a378..674fd04f2fc1c7 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -1651,7 +1651,7 @@ SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[],
BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap);
for (unsigned i = 0; i < 4; i++) {
- unsigned Idx = Swz[i]->getAsConstantVal();
+ unsigned Idx = Swz[i]->getAsZExtVal();
if (SwizzleRemap.contains(Idx))
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32);
}
@@ -1659,7 +1659,7 @@ SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[],
SwizzleRemap.clear();
BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap);
for (unsigned i = 0; i < 4; i++) {
- unsigned Idx = Swz[i]->getAsConstantVal();
+ unsigned Idx = Swz[i]->getAsZExtVal();
if (SwizzleRemap.contains(Idx))
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32);
}
@@ -1780,7 +1780,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
// Check that we know which element is being inserted
if (!isa<ConstantSDNode>(EltNo))
return SDValue();
- unsigned Elt = EltNo->getAsConstantVal();
+ unsigned Elt = EltNo->getAsZExtVal();
// Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
// be converted to a BUILD_VECTOR). Fill in the Ops vector with the
@@ -2021,7 +2021,7 @@ bool R600TargetLowering::FoldOperand(SDNode *ParentNode, unsigned SrcIdx,
}
case R600::MOV_IMM_GLOBAL_ADDR:
// Check if the Imm slot is used. Taken from below.
- if (Imm->getAsConstantVal())
+ if (Imm->getAsZExtVal())
return false;
Imm = Src.getOperand(0);
Src = DAG.getRegister(R600::ALU_LITERAL_X, MVT::i32);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 4c16229e517912..10f44720a16210 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6424,7 +6424,7 @@ SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
EVT InsVT = Ins.getValueType();
EVT EltVT = VecVT.getVectorElementType();
unsigned InsNumElts = InsVT.getVectorNumElements();
- unsigned IdxVal = Idx->getAsConstantVal();
+ unsigned IdxVal = Idx->getAsZExtVal();
SDLoc SL(Op);
if (EltVT.getScalarSizeInBits() == 16 && IdxVal % 2 == 0) {
@@ -7453,7 +7453,7 @@ SDValue SITargetLowering::lowerImage(SDValue Op,
Ops.push_back(IsA16 ? True : False);
if (!Subtarget->hasGFX90AInsts()) {
Ops.push_back(TFE); //tfe
- } else if (TFE->getAsConstantVal()) {
+ } else if (TFE->getAsZExtVal()) {
report_fatal_error("TFE is not supported on this GPU");
}
if (!IsGFX12Plus || BaseOpcode->Sampler || BaseOpcode->MSAA)
@@ -7590,7 +7590,7 @@ SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
setBufferOffsets(Offset, DAG, &Ops[3],
NumLoads > 1 ? Align(16 * NumLoads) : Align(4));
- uint64_t InstOffset = Ops[5]->getAsConstantVal();
+ uint64_t InstOffset = Ops[5]->getAsZExtVal();
for (unsigned i = 0; i < NumLoads; ++i) {
Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32);
Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops,
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 4a8b80c2852bf7..196dbdf158600e 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -338,8 +338,8 @@ bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
return false;
- Offset0 = Off0->getAsConstantVal();
- Offset1 = Off1->getAsConstantVal();
+ Offset0 = Off0->getAsZExtVal();
+ Offset1 = Off1->getAsZExtVal();
return true;
}
diff --git a/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp b/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp
index aec01c002ea291..17c2d7bb13b47e 100644
--- a/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp
@@ -170,7 +170,7 @@ bool ARCDAGToDAGISel::SelectFrameADDR_ri(SDValue Addr, SDValue &Base,
void ARCDAGToDAGISel::Select(SDNode *N) {
switch (N->getOpcode()) {
case ISD::Constant: {
- uint64_t CVal = N->getAsConstantVal();
+ uint64_t CVal = N->getAsZExtVal();
ReplaceNode(N, CurDAG->getMachineNode(
isInt<12>(CVal) ? ARC::MOV_rs12 : ARC::MOV_rlimm,
SDLoc(N), MVT::i32,
diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 29dad62d197034..fad850c9eea71d 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -372,7 +372,7 @@ INITIALIZE_PASS(ARMDAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
/// operand. If so Imm will receive the 32-bit value.
static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
- Imm = N->getAsConstantVal();
+ Imm = N->getAsZExtVal();
return true;
}
return false;
@@ -1102,7 +1102,7 @@ bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
Offset = N.getOperand(0);
SDValue N1 = N.getOperand(1);
Label =
- CurDAG->getTargetConstant(N1->getAsConstantVal(), SDLoc(N), MVT::i32);
+ CurDAG->getTargetConstant(N1->getAsZExtVal(), SDLoc(N), MVT::i32);
return true;
}
@@ -1942,7 +1942,7 @@ SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, const SDLoc &dl,
if (!is64BitVector && NumVecs < 3)
NumRegs *= 2;
- unsigned Alignment = Align->getAsConstantVal();
+ unsigned Alignment = Align->getAsZExtVal();
if (Alignment >= 32 && NumRegs == 4)
Alignment = 32;
else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
@@ -2428,7 +2428,7 @@ void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
unsigned Alignment = 0;
if (NumVecs != 3) {
- Alignment = Align->getAsConstantVal();
+ Alignment = Align->getAsZExtVal();
unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
if (Alignment > NumBytes)
Alignment = NumBytes;
@@ -2871,7 +2871,7 @@ void ARMDAGToDAGISel::SelectMVE_VxDUP(SDNode *N, const uint16_t *Opcodes,
Ops.push_back(N->getOperand(OpIdx++)); // limit
SDValue ImmOp = N->getOperand(OpIdx++); // step
- int ImmValue = ImmOp->getAsConstantVal();
+ int ImmValue = ImmOp->getAsZExtVal();
Ops.push_back(getI32Imm(ImmValue, Loc));
if (Predicated)
@@ -2892,7 +2892,7 @@ void ARMDAGToDAGISel::SelectCDE_CXxD(SDNode *N, uint16_t Opcode,
// Convert and append the immediate operand designating the coprocessor.
SDValue ImmCorpoc = N->getOperand(OpIdx++);
- uint32_t ImmCoprocVal = ImmCorpoc->getAsConstantVal();
+ uint32_t ImmCoprocVal = ImmCorpoc->getAsZExtVal();
Ops.push_back(getI32Imm(ImmCoprocVal, Loc));
// For accumulating variants copy the low and high order parts of the
@@ -2911,7 +2911,7 @@ void ARMDAGToDAGISel::SelectCDE_CXxD(SDNode *N, uint16_t Opcode,
// Convert and append the immediate operand
SDValue Imm = N->getOperand(OpIdx);
- uint32_t ImmVal = Imm->getAsConstantVal();
+ uint32_t ImmVal = Imm->getAsZExtVal();
Ops.push_back(getI32Imm(ImmVal, Loc));
// Accumulating variants are IT-predicable, add predicate operands.
@@ -2965,7 +2965,7 @@ void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool IsIntrinsic,
unsigned Alignment = 0;
if (NumVecs != 3) {
- Alignment = Align->getAsConstantVal();
+ Alignment = Align->getAsZExtVal();
unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
if (Alignment > NumBytes)
Alignment = NumBytes;
@@ -3697,7 +3697,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
// Other cases are autogenerated.
break;
case ISD::Constant: {
- unsigned Val = N->getAsConstantVal();
+ unsigned Val = N->getAsZExtVal();
// If we can't materialize the constant we need to use a literal pool
if (ConstantMaterializationCost(Val, Subtarget) > 2 &&
!Subtarget->genExecuteOnly()) {
@@ -4132,7 +4132,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
assert(N2.getOpcode() == ISD::Constant);
assert(N3.getOpcode() == ISD::Register);
- unsigned CC = (unsigned)N2->getAsConstantVal();
+ unsigned CC = (unsigned)N2->getAsZExtVal();
if (InGlue.getOpcode() == ARMISD::CMPZ) {
if (InGlue.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN) {
@@ -4243,7 +4243,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
if (SwitchEQNEToPLMI) {
SDValue ARMcc = N->getOperand(2);
- ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsConstantVal();
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal();
switch (CC) {
default: llvm_unreachable("CMPZ must be either NE or EQ!");
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index cce000eeef15c3..e746edaa82957c 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -4820,7 +4820,7 @@ SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// some tweaks to the heuristics for the previous and->shift transform.
// FIXME: Optimize cases where the LHS isn't a shift.
if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL &&
- isa<ConstantSDNode>(RHS) && RHS->getAsConstantVal() == 0x80000000U &&
+ isa<ConstantSDNode>(RHS) && RHS->getAsZExtVal() == 0x80000000U &&
CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) &&
LHS.getConstantOperandVal(1) < 31) {
unsigned ShiftAmt = LHS.getConstantOperandVal(1) + 1;
@@ -5532,7 +5532,7 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
// Choose GE over PL, which vsel does now support
- if (ARMcc->getAsConstantVal() == ARMCC::PL)
+ if (ARMcc->getAsZExtVal() == ARMCC::PL)
ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32);
return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
}
@@ -7748,7 +7748,7 @@ static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
uint64_t Val;
if (!isa<ConstantSDNode>(N))
return SDValue();
- Val = N->getAsConstantVal();
+ Val = N->getAsZExtVal();
if (ST->isThumb1Only()) {
if (Val <= 255 || ~Val <= 255)
@@ -7803,7 +7803,7 @@ static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
SDValue V = Op.getOperand(i);
if (!isa<ConstantSDNode>(V) && !V.isUndef())
continue;
- bool BitSet = V.isUndef() ? false : V->getAsConstantVal();
+ bool BitSet = V.isUndef() ? false : V->getAsZExtVal();
if (BitSet)
Bits32 |= BoolMask << (i * BitsPerBool);
}
@@ -9239,7 +9239,7 @@ static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG,
EVT VT = Op.getValueType();
EVT Op1VT = V1.getValueType();
unsigned NumElts = VT.getVectorNumElements();
- unsigned Index = V2->getAsConstantVal();
+ unsigned Index = V2->getAsZExtVal();
assert(VT.getScalarSizeInBits() == 1 &&
"Unexpected custom EXTRACT_SUBVECTOR lowering");
@@ -14617,7 +14617,7 @@ static SDValue PerformORCombineToBFI(SDNode *N,
// Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
// where lsb(mask) == #shamt and masked bits of B are known zero.
SDValue ShAmt = N00.getOperand(1);
- unsigned ShAmtC = ShAmt->getAsConstantVal();
+ unsigned ShAmtC = ShAmt->getAsZExtVal();
unsigned LSB = llvm::countr_zero(Mask);
if (ShAmtC != LSB)
return SDValue();
@@ -18338,7 +18338,7 @@ ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
SDValue Chain = N->getOperand(0);
SDValue BB = N->getOperand(1);
SDValue ARMcc = N->getOperand(2);
- ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsConstantVal();
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal();
// (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
// -> (brcond Chain BB CC CPSR Cmp)
@@ -18371,7 +18371,7 @@ ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
SDValue FalseVal = N->getOperand(0);
SDValue TrueVal = N->getOperand(1);
SDValue ARMcc = N->getOperand(2);
- ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsConstantVal();
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal();
// BFI is only available on V6T2+.
if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index be0cec36eebdf3..f91e77adb8f810 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -660,7 +660,7 @@ SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS,
SDValue Cmp;
if (LHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(RHS)) {
- uint64_t Imm = RHS->getAsConstantVal();
+ uint64_t Imm = RHS->getAsZExtVal();
// Generate a CPI/CPC pair if RHS is a 16-bit constant. Use the zero
// register for the constant RHS if its lower or higher byte is zero.
SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
@@ -680,7 +680,7 @@ SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS,
} else if (RHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(LHS)) {
// Generate a CPI/CPC pair if LHS is a 16-bit constant. Use the zero
// register for the constant LHS if its lower or higher byte is zero.
- uint64_t Imm = LHS->getAsConstantVal();
+ uint64_t Imm = LHS->getAsZExtVal();
SDValue LHSlo = (Imm & 0xff) == 0
? DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8)
: DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index eeb18056709a34..defb1f7324f4e2 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -743,7 +743,7 @@ void HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
//
void HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
if (N->getValueType(0) == MVT::i1) {
- assert(!(N->getAsConstantVal() >> 1));
+ assert(!(N->getAsZExtVal() >> 1));
unsigned Opc = (cast<ConstantSDNode>(N)->getSExtValue() != 0)
? Hexagon::PS_true
: Hexagon::PS_false;
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
index d84c0939a66bfc..81035849491bcb 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
@@ -1256,7 +1256,7 @@ HexagonTargetLowering::extractHvxSubvectorReg(SDValue OrigOp, SDValue VecV,
SDValue IdxV, const SDLoc &dl, MVT ResTy, SelectionDAG &DAG) const {
MVT VecTy = ty(VecV);
unsigned HwLen = Subtarget.getVectorLength();
- unsigned Idx = IdxV.getNode()->getAsConstantVal();
+ unsigned Idx = IdxV.getNode()->getAsZExtVal();
MVT ElemTy = VecTy.getVectorElementType();
unsigned ElemWidth = ElemTy.getSizeInBits();
@@ -1299,7 +1299,7 @@ HexagonTargetLowering::extractHvxSubvectorPred(SDValue VecV, SDValue IdxV,
MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen);
SDValue ByteVec = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, VecV);
// IdxV is required to be a constant.
- unsigned Idx = IdxV.getNode()->getAsConstantVal();
+ unsigned Idx = IdxV.getNode()->getAsZExtVal();
unsigned ResLen = ResTy.getVectorNumElements();
unsigned BitBytes = HwLen / VecTy.getVectorNumElements();
@@ -1801,7 +1801,7 @@ HexagonTargetLowering::LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG)
MVT SrcTy = ty(SrcV);
MVT DstTy = ty(Op);
SDValue IdxV = Op.getOperand(1);
- unsigned Idx = IdxV.getNode()->getAsConstantVal();
+ unsigned Idx = IdxV.getNode()->getAsZExtVal();
assert(Idx % DstTy.getVectorNumElements() == 0);
(void)Idx;
const SDLoc &dl(Op);
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index a776511fb9f493..eaa02c66c2429c 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -525,7 +525,7 @@ LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
if (isa<ConstantSDNode>(Idx) &&
(EltTy == MVT::i32 || EltTy == MVT::i64 || EltTy == MVT::f32 ||
- EltTy == MVT::f64 || Idx->getAsConstantVal() < NumElts / 2))
+ EltTy == MVT::f64 || Idx->getAsZExtVal() < NumElts / 2))
return Op;
return SDValue();
@@ -1382,28 +1382,28 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.is64Bit())
return emitIntrinsicErrorMessage(Op, ErrorMsgReqLA32, DAG);
// call void @llvm.loongarch.cacop.[d/w](uimm5, rj, simm12)
- unsigned Imm1 = Op2->getAsConstantVal();
+ unsigned Imm1 = Op2->getAsZExtVal();
int Imm2 = cast<ConstantSDNode>(Op.getOperand(4))->getSExtValue();
if (!isUInt<5>(Imm1) || !isInt<12>(Imm2))
return emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG);
return Op;
}
case Intrinsic::loongarch_dbar: {
- unsigned Imm = Op2->getAsConstantVal();
+ unsigned Imm = Op2->getAsZExtVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::DBAR, DL, MVT::Other, Chain,
DAG.getConstant(Imm, DL, GRLenVT));
}
case Intrinsic::loongarch_ibar: {
- unsigned Imm = Op2->getAsConstantVal();
+ unsigned Imm = Op2->getAsZExtVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::IBAR, DL, MVT::Other, Chain,
DAG.getConstant(Imm, DL, GRLenVT));
}
case Intrinsic::loongarch_break: {
- unsigned Imm = Op2->getAsConstantVal();
+ unsigned Imm = Op2->getAsZExtVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::BREAK, DL, MVT::Other, Chain,
@@ -1412,7 +1412,7 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
case Intrinsic::loongarch_movgr2fcsr: {
if (!Subtarget.hasBasicF())
return emitIntrinsicErrorMessage(Op, ErrorMsgReqF, DAG);
- unsigned Imm = Op2->getAsConstantVal();
+ unsigned Imm = Op2->getAsZExtVal();
return !isUInt<2>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::MOVGR2FCSR, DL, MVT::Other, Chain,
@@ -1421,7 +1421,7 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
Op.getOperand(3)));
}
case Intrinsic::loongarch_syscall: {
- unsigned Imm = Op2->getAsConstantVal();
+ unsigned Imm = Op2->getAsZExtVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::SYSCALL, DL, MVT::Other, Chain,
@@ -1924,7 +1924,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqF);
return;
}
- unsigned Imm = Op2->getAsConstantVal();
+ unsigned Imm = Op2->getAsZExtVal();
if (!isUInt<2>(Imm)) {
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgOOR);
return;
@@ -1980,7 +1980,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
CSR_CASE(iocsrrd_d);
#undef CSR_CASE
case Intrinsic::loongarch_csrrd_w: {
- unsigned Imm = Op2->getAsConstantVal();
+ unsigned Imm = Op2->getAsZExtVal();
if (!isUInt<14>(Imm)) {
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgOOR);
return;
diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp
index b26ed5c4749de6..158393f02a2470 100644
--- a/llvm/lib/Target/M68k/M68kISelLowering.cpp
+++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp
@@ -2375,7 +2375,7 @@ SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// a >= b ? -1 : 0 -> RES = setcc_carry
// a >= b ? 0 : -1 -> RES = ~setcc_carry
if (Cond.getOpcode() == M68kISD::SUB) {
- unsigned CondCode = CC->getAsConstantVal();
+ unsigned CondCode = CC->getAsZExtVal();
if ((CondCode == M68k::COND_CC || CondCode == M68k::COND_CS) &&
(isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
@@ -2491,7 +2491,7 @@ SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
Cond = Cmp;
AddTest = false;
} else {
- switch (CC->getAsConstantVal()) {
+ switch (CC->getAsZExtVal()) {
default:
break;
case M68k::COND_VS:
diff --git a/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index b4cee26cc09b33..efb23b1a4e3f58 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -308,12 +308,12 @@ static bool isValidIndexedLoad(const LoadSDNode *LD) {
switch (VT.getSimpleVT().SimpleTy) {
case MVT::i8:
- if (LD->getOffset()->getAsConstantVal() != 1)
+ if (LD->getOffset()->getAsZExtVal() != 1)
return false;
break;
case MVT::i16:
- if (LD->getOffset()->getAsConstantVal() != 2)
+ if (LD->getOffset()->getAsZExtVal() != 2)
return false;
break;
diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index 3174d613cc38bf..1db31f8974254b 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -1168,7 +1168,7 @@ SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
bool Invert = false;
bool Shift = false;
bool Convert = true;
- switch (TargetCC->getAsConstantVal()) {
+ switch (TargetCC->getAsZExtVal()) {
default:
Convert = false;
break;
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 301eb3169ef2f9..d431d3d91494f6 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -2042,7 +2042,7 @@ SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
return Op;
SDValue CCNode = CondRes.getOperand(2);
- Mips::CondCode CC = (Mips::CondCode)CCNode->getAsConstantVal();
+ Mips::CondCode CC = (Mips::CondCode)CCNode->getAsZExtVal();
unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T;
SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 7b386b024803a8..c0e978018919e0 100644
--- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -76,7 +76,7 @@ void MipsSEDAGToDAGISel::addDSPCtrlRegOperands(bool IsDef, MachineInstr &MI,
}
unsigned MipsSEDAGToDAGISel::getMSACtrlReg(const SDValue RegIdx) const {
- uint64_t RegNum = RegIdx->getAsConstantVal();
+ uint64_t RegNum = RegIdx->getAsZExtVal();
return Mips::MSACtrlRegClass.getRegister(RegNum);
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 2c9966696ed156..7abe984b34e197 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -2076,7 +2076,7 @@ bool NVPTXDAGToDAGISel::tryLoadParam(SDNode *Node) {
VTs = CurDAG->getVTList(EVTs);
}
- unsigned OffsetVal = Offset->getAsConstantVal();
+ unsigned OffsetVal = Offset->getAsZExtVal();
SmallVector<SDValue, 2> Ops;
Ops.push_back(CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32));
@@ -2091,7 +2091,7 @@ bool NVPTXDAGToDAGISel::tryStoreRetval(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
SDValue Offset = N->getOperand(1);
- unsigned OffsetVal = Offset->getAsConstantVal();
+ unsigned OffsetVal = Offset->getAsZExtVal();
MemSDNode *Mem = cast<MemSDNode>(N);
// How many elements do we have?
@@ -2158,9 +2158,9 @@ bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
SDValue Param = N->getOperand(1);
- unsigned ParamVal = Param->getAsConstantVal();
+ unsigned ParamVal = Param->getAsZExtVal();
SDValue Offset = N->getOperand(2);
- unsigned OffsetVal = Offset->getAsConstantVal();
+ unsigned OffsetVal = Offset->getAsZExtVal();
MemSDNode *Mem = cast<MemSDNode>(N);
SDValue Glue = N->getOperand(N->getNumOperands() - 1);
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 693e75bdd6ff1c..5d6d08d10def16 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -5811,7 +5811,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
SDLoc DL(N);
// Get the intrinsic ID
- unsigned IntrinNo = Intrin.getNode()->getAsConstantVal();
+ unsigned IntrinNo = Intrin.getNode()->getAsZExtVal();
switch (IntrinNo) {
default:
return;
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 35b0f0a706480d..a554d774ef8ee4 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -565,7 +565,7 @@ static bool hasTocDataAttr(SDValue Val, unsigned PointerSize) {
/// operand. If so Imm will receive the 32-bit value.
static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
- Imm = N->getAsConstantVal();
+ Imm = N->getAsZExtVal();
return true;
}
return false;
@@ -575,7 +575,7 @@ static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
/// operand. If so Imm will receive the 64-bit value.
static bool isInt64Immediate(SDNode *N, uint64_t &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i64) {
- Imm = N->getAsConstantVal();
+ Imm = N->getAsZExtVal();
return true;
}
return false;
@@ -1500,7 +1500,7 @@ static SDNode *selectI64Imm(SelectionDAG *CurDAG, SDNode *N) {
SDLoc dl(N);
// Get 64 bit value.
- int64_t Imm = N->getAsConstantVal();
+ int64_t Imm = N->getAsZExtVal();
if (unsigned MinSize = allUsesTruncate(CurDAG, N)) {
uint64_t SextImm = SignExtend64(Imm, MinSize);
SDValue SDImm = CurDAG->getTargetConstant(SextImm, dl, MVT::i64);
@@ -4923,7 +4923,7 @@ bool PPCDAGToDAGISel::trySelectLoopCountIntrinsic(SDNode *N) {
SDNode *NewDecrement = CurDAG->getMachineNode(DecrementOpcode, DecrementLoc,
MVT::i1, DecrementOps);
- unsigned Val = RHS->getAsConstantVal();
+ unsigned Val = RHS->getAsZExtVal();
bool IsBranchOnTrue = (CC == ISD::SETEQ && Val) || (CC == ISD::SETNE && !Val);
unsigned Opcode = IsBranchOnTrue ? PPC::BC : PPC::BCn;
@@ -5765,7 +5765,7 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
break;
// If the multiplier fits int16, we can handle it with mulli.
- int64_t Imm = Op1->getAsConstantVal();
+ int64_t Imm = Op1->getAsZExtVal();
unsigned Shift = llvm::countr_zero<uint64_t>(Imm);
if (isInt<16>(Imm) || !Shift)
break;
@@ -6612,8 +6612,8 @@ void PPCDAGToDAGISel::foldBoolExts(SDValue &Res, SDNode *&N) {
// For us to materialize these using one instruction, we must be able to
// represent them as signed 16-bit integers.
- uint64_t True = TrueRes->getAsConstantVal(),
- False = FalseRes->getAsConstantVal();
+ uint64_t True = TrueRes->getAsZExtVal(),
+ False = FalseRes->getAsZExtVal();
if (!isInt<16>(True) || !isInt<16>(False))
break;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 5c865dae018e17..235df1880b37c2 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2566,7 +2566,7 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
if (LeadingZero) {
if (!UniquedVals[Multiple-1].getNode())
return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef
- int Val = UniquedVals[Multiple - 1]->getAsConstantVal();
+ int Val = UniquedVals[Multiple - 1]->getAsZExtVal();
if (Val < 16) // 0,0,0,4 -> vspltisw(4)
return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
}
@@ -2635,11 +2635,11 @@ bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
if (!isa<ConstantSDNode>(N))
return false;
- Imm = (int16_t)N->getAsConstantVal();
+ Imm = (int16_t)N->getAsZExtVal();
if (N->getValueType(0) == MVT::i32)
- return Imm == (int32_t)N->getAsConstantVal();
+ return Imm == (int32_t)N->getAsZExtVal();
else
- return Imm == (int64_t)N->getAsConstantVal();
+ return Imm == (int64_t)N->getAsZExtVal();
}
bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
return isIntS16Immediate(Op.getNode(), Imm);
@@ -2684,7 +2684,7 @@ bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {
if (!isa<ConstantSDNode>(N))
return false;
- Imm = (int64_t)N->getAsConstantVal();
+ Imm = (int64_t)N->getAsZExtVal();
return isInt<34>(Imm);
}
bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) {
@@ -15580,7 +15580,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
NarrowOp.getOpcode() != ISD::ROTL && NarrowOp.getOpcode() != ISD::ROTR)
break;
- uint64_t Imm = Op2->getAsConstantVal();
+ uint64_t Imm = Op2->getAsZExtVal();
// Make sure that the constant is narrow enough to fit in the narrow type.
if (!isUInt<32>(Imm))
break;
@@ -16795,7 +16795,7 @@ void PPCTargetLowering::CollectTargetIntrinsicOperands(const CallInst &I,
return;
if (!isa<ConstantSDNode>(Ops[1].getNode()))
return;
- auto IntrinsicID = Ops[1].getNode()->getAsConstantVal();
+ auto IntrinsicID = Ops[1].getNode()->getAsZExtVal();
if (IntrinsicID != Intrinsic::ppc_tdw && IntrinsicID != Intrinsic::ppc_tw &&
IntrinsicID != Intrinsic::ppc_trapd && IntrinsicID != Intrinsic::ppc_trap)
return;
@@ -18430,7 +18430,7 @@ PPC::AddrMode PPCTargetLowering::SelectOptimalAddrMode(const SDNode *Parent,
if (Flags & PPC::MOF_RPlusSImm16) {
SDValue Op0 = N.getOperand(0);
SDValue Op1 = N.getOperand(1);
- int16_t Imm = Op1->getAsConstantVal();
+ int16_t Imm = Op1->getAsZExtVal();
if (!Align || isAligned(*Align, Imm)) {
Disp = DAG.getTargetConstant(Imm, DL, N.getValueType());
Base = Op0;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 76bcdf98e89b55..7cc665015b9f77 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3489,7 +3489,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
for (unsigned I = 0; I < NumElts;) {
SDValue V = Op.getOperand(I);
- bool BitValue = !V.isUndef() && V->getAsConstantVal();
+ bool BitValue = !V.isUndef() && V->getAsZExtVal();
Bits |= ((uint64_t)BitValue << BitPos);
++BitPos;
++I;
@@ -3620,7 +3620,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
for (const auto &OpIdx : enumerate(Op->op_values())) {
const auto &SeqV = OpIdx.value();
if (!SeqV.isUndef())
- SplatValue |= ((SeqV->getAsConstantVal() & EltMask)
+ SplatValue |= ((SeqV->getAsZExtVal() & EltMask)
<< (OpIdx.index() * EltBitSize));
}
@@ -3677,7 +3677,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
for (const auto &SeqV : Sequence) {
if (!SeqV.isUndef())
SplatValue |=
- ((SeqV->getAsConstantVal() & EltMask) << (EltIdx * EltBitSize));
+ ((SeqV->getAsZExtVal() & EltMask) << (EltIdx * EltBitSize));
EltIdx++;
}
@@ -3938,7 +3938,7 @@ static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
(isa<RegisterSDNode>(VL) &&
cast<RegisterSDNode>(VL)->getReg() == RISCV::X0))
NewVL = DAG.getRegister(RISCV::X0, MVT::i32);
- else if (isa<ConstantSDNode>(VL) && isUInt<4>(VL->getAsConstantVal()))
+ else if (isa<ConstantSDNode>(VL) && isUInt<4>(VL->getAsZExtVal()))
NewVL = DAG.getNode(ISD::ADD, DL, VL.getValueType(), VL, VL);
if (NewVL) {
@@ -7905,7 +7905,7 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
// Use tail agnostic policy if Idx is the last index of Vec.
unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
if (VecVT.isFixedLengthVector() && isa<ConstantSDNode>(Idx) &&
- Idx->getAsConstantVal() + 1 == VecVT.getVectorNumElements())
+ Idx->getAsZExtVal() + 1 == VecVT.getVectorNumElements())
Policy = RISCVII::TAIL_AGNOSTIC;
SDValue Slideup = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, ValInVec,
Idx, Mask, InsertVL, Policy);
@@ -8165,7 +8165,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
const auto [MinVLMAX, MaxVLMAX] =
RISCVTargetLowering::computeVLMAXBounds(VT, Subtarget);
- uint64_t AVLInt = AVL->getAsConstantVal();
+ uint64_t AVLInt = AVL->getAsZExtVal();
if (AVLInt <= MinVLMAX) {
I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
} else if (AVLInt >= 2 * MaxVLMAX) {
@@ -8231,7 +8231,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
SDValue Mask = Operands[NumOps - 3];
SDValue MaskedOff = Operands[1];
// Assume Policy operand is the last operand.
- uint64_t Policy = Operands[NumOps - 1]->getAsConstantVal();
+ uint64_t Policy = Operands[NumOps - 1]->getAsZExtVal();
// We don't need to select maskedoff if it's undef.
if (MaskedOff.isUndef())
return Vec;
diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index c0767f6ae4cd78..320f91c7605782 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -1641,7 +1641,7 @@ void SystemZDAGToDAGISel::Select(SDNode *Node) {
// If this is a 64-bit constant that is out of the range of LLILF,
// LLIHF and LGFI, split it into two 32-bit pieces.
if (Node->getValueType(0) == MVT::i64) {
- uint64_t Val = Node->getAsConstantVal();
+ uint64_t Val = Node->getAsZExtVal();
if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val)) {
splitLargeImmediate(ISD::OR, Node, SDValue(), Val - uint32_t(Val),
uint32_t(Val));
@@ -1677,8 +1677,8 @@ void SystemZDAGToDAGISel::Select(SDNode *Node) {
isInt<16>(cast<ConstantSDNode>(Op0)->getSExtValue())))) {
SDValue CCValid = Node->getOperand(2);
SDValue CCMask = Node->getOperand(3);
- uint64_t ConstCCValid = CCValid.getNode()->getAsConstantVal();
- uint64_t ConstCCMask = CCMask.getNode()->getAsConstantVal();
+ uint64_t ConstCCValid = CCValid.getNode()->getAsZExtVal();
+ uint64_t ConstCCMask = CCMask.getNode()->getAsZExtVal();
// Invert the condition.
CCMask = CurDAG->getTargetConstant(ConstCCValid ^ ConstCCMask,
SDLoc(Node), CCMask.getValueType());
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 744951933ed879..2450c6801a6632 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -2663,7 +2663,7 @@ static void adjustForFNeg(Comparison &C) {
static void adjustForLTGFR(Comparison &C) {
// Check for a comparison between (shl X, 32) and 0.
if (C.Op0.getOpcode() == ISD::SHL && C.Op0.getValueType() == MVT::i64 &&
- C.Op1.getOpcode() == ISD::Constant && C.Op1->getAsConstantVal() == 0) {
+ C.Op1.getOpcode() == ISD::Constant && C.Op1->getAsZExtVal() == 0) {
auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
if (C1 && C1->getZExtValue() == 32) {
SDValue ShlOp0 = C.Op0.getOperand(0);
@@ -2688,7 +2688,7 @@ static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
C.Op1.getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(C.Op1)->getValueSizeInBits(0) <= 64 &&
- C.Op1->getAsConstantVal() == 0) {
+ C.Op1->getAsZExtVal() == 0) {
auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
C.Op0.getValueSizeInBits().getFixedValue()) {
@@ -3033,12 +3033,12 @@ static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid,
- CmpOp1->getAsConstantVal(), Cond);
+ CmpOp1->getAsZExtVal(), Cond);
if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 &&
isIntrinsicWithCC(CmpOp0, Opcode, CCValid))
return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid,
- CmpOp1->getAsConstantVal(), Cond);
+ CmpOp1->getAsZExtVal(), Cond);
}
Comparison C(CmpOp0, CmpOp1, Chain);
C.CCMask = CCMaskForCondCode(Cond);
@@ -3459,7 +3459,7 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
C.CCMask != SystemZ::CCMASK_CMP_NE &&
C.Op1.getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(C.Op1)->getValueSizeInBits(0) <= 64 &&
- C.Op1->getAsConstantVal() == 0) {
+ C.Op1->getAsZExtVal() == 0) {
if (isAbsolute(C.Op0, TrueOp, FalseOp))
return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
if (isAbsolute(C.Op0, FalseOp, TrueOp))
@@ -3944,7 +3944,7 @@ SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(SDValue Op,
// If user has set the no alignment function attribute, ignore
// alloca alignments.
- uint64_t AlignVal = (RealignOpt ? Align->getAsConstantVal() : 0);
+ uint64_t AlignVal = (RealignOpt ? Align->getAsZExtVal() : 0);
uint64_t StackAlign = TFI->getStackAlignment();
uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
@@ -4009,7 +4009,7 @@ SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(SDValue Op,
// If user has set the no alignment function attribute, ignore
// alloca alignments.
- uint64_t AlignVal = (RealignOpt ? Align->getAsConstantVal() : 0);
+ uint64_t AlignVal = (RealignOpt ? Align->getAsZExtVal() : 0);
uint64_t StackAlign = TFI->getStackAlignment();
uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
@@ -4208,7 +4208,7 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
// If the low part is a constant that is outside the range of LHI,
// then we're better off using IILF.
if (LowOp.getOpcode() == ISD::Constant) {
- int64_t Value = int32_t(LowOp->getAsConstantVal());
+ int64_t Value = int32_t(LowOp->getAsZExtVal());
if (!isInt<16>(Value))
return Op;
}
@@ -5892,7 +5892,7 @@ SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
Op1.getOpcode() != ISD::BITCAST &&
Op1.getOpcode() != ISD::ConstantFP &&
Op2.getOpcode() == ISD::Constant) {
- uint64_t Index = Op2->getAsConstantVal();
+ uint64_t Index = Op2->getAsZExtVal();
unsigned Mask = VT.getVectorNumElements() - 1;
if (Index <= Mask)
return Op;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 71c5430f1f9efa..7c47790d1e3515 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -1869,7 +1869,7 @@ SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
Ops[OpIdx++] = Op.getOperand(2);
while (OpIdx < 18) {
const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
- if (MaskIdx.isUndef() || MaskIdx.getNode()->getAsConstantVal() >= 32) {
+ if (MaskIdx.isUndef() || MaskIdx.getNode()->getAsZExtVal() >= 32) {
bool isTarget = MaskIdx.getNode()->getOpcode() == ISD::TargetConstant;
Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32, isTarget);
} else {
@@ -1911,7 +1911,7 @@ WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
const SDNode *Index = Extract.getOperand(1).getNode();
if (!isa<ConstantSDNode>(Index))
return SDValue();
- unsigned IndexVal = Index->getAsConstantVal();
+ unsigned IndexVal = Index->getAsZExtVal();
unsigned Scale =
ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
assert(Scale > 1);
@@ -2334,7 +2334,7 @@ WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
if (isa<ConstantSDNode>(IdxNode)) {
// Ensure the index type is i32 to match the tablegen patterns
- uint64_t Idx = IdxNode->getAsConstantVal();
+ uint64_t Idx = IdxNode->getAsZExtVal();
SmallVector<SDValue, 3> Ops(Op.getNode()->ops());
Ops[Op.getNumOperands() - 1] =
DAG.getConstant(Idx, SDLoc(IdxNode), MVT::i32);
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 5e8eb37af8bfab..9c9b6c7e9600f4 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -2852,7 +2852,7 @@ bool X86DAGToDAGISel::selectVectorAddr(MemSDNode *Parent, SDValue BasePtr,
SDValue &Index, SDValue &Disp,
SDValue &Segment) {
X86ISelAddressMode AM;
- AM.Scale = ScaleOp->getAsConstantVal();
+ AM.Scale = ScaleOp->getAsZExtVal();
// Attempt to match index patterns, as long as we're not relying on implicit
// sign-extension, which is performed BEFORE scale.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 12afc563f80464..99e8f4309a18c0 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -7371,7 +7371,7 @@ static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
/// index.
static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
SDValue ExtIdx) {
- int Idx = ExtIdx->getAsConstantVal();
+ int Idx = ExtIdx->getAsZExtVal();
if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
return Idx;
@@ -8793,7 +8793,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
- unsigned InsertC = InsIndex->getAsConstantVal();
+ unsigned InsertC = InsIndex->getAsZExtVal();
unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
if (InsertC < NumEltsInLow128Bits)
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
@@ -17747,7 +17747,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
DAG.getBitcast(MVT::v4i32, Vec), Idx));
- unsigned IdxVal = Idx->getAsConstantVal();
+ unsigned IdxVal = Idx->getAsZExtVal();
SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, Vec,
DAG.getTargetConstant(IdxVal, dl, MVT::i8));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
@@ -24061,7 +24061,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// a >= b ? -1 : 0 -> RES = setcc_carry
// a >= b ? 0 : -1 -> RES = ~setcc_carry
if (Cond.getOpcode() == X86ISD::SUB) {
- unsigned CondCode = CC->getAsConstantVal();
+ unsigned CondCode = CC->getAsZExtVal();
if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
(isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
@@ -25360,7 +25360,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
if (IntrData->Type == INTR_TYPE_3OP_IMM8 &&
Src3.getValueType() != MVT::i8) {
Src3 =
- DAG.getTargetConstant(Src3->getAsConstantVal() & 0xff, dl, MVT::i8);
+ DAG.getTargetConstant(Src3->getAsZExtVal() & 0xff, dl, MVT::i8);
}
// We specify 2 possible opcodes for intrinsics with rounding modes.
@@ -25386,7 +25386,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue Src4 = Op.getOperand(4);
if (Src4.getValueType() != MVT::i8) {
Src4 =
- DAG.getTargetConstant(Src4->getAsConstantVal() & 0xff, dl, MVT::i8);
+ DAG.getTargetConstant(Src4->getAsZExtVal() & 0xff, dl, MVT::i8);
}
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
@@ -26788,7 +26788,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
{Chain, Op1, Op2, Size}, VT, MMO);
Chain = Res.getValue(1);
Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
- unsigned Imm = Op2->getAsConstantVal();
+ unsigned Imm = Op2->getAsZExtVal();
if (Imm)
Res = DAG.getNode(ISD::SHL, DL, VT, Res,
DAG.getShiftAmountConstant(Imm, VT, DL));
@@ -41795,7 +41795,7 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
- unsigned ShAmt = Op1->getAsConstantVal();
+ unsigned ShAmt = Op1->getAsZExtVal();
if (ShAmt >= BitWidth)
break;
@@ -42580,7 +42580,7 @@ static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
APInt Imm(SrcVT.getVectorNumElements(), 0);
for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
SDValue In = Op.getOperand(Idx);
- if (!In.isUndef() && (In->getAsConstantVal() & 0x1))
+ if (!In.isUndef() && (In->getAsZExtVal() & 0x1))
Imm.setBit(Idx);
}
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
@@ -53258,7 +53258,7 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
if (Index.getOpcode() == ISD::ADD &&
Index.getValueType().getVectorElementType() == PtrVT &&
isa<ConstantSDNode>(Scale)) {
- uint64_t ScaleAmt = Scale->getAsConstantVal();
+ uint64_t ScaleAmt = Scale->getAsZExtVal();
if (auto *BV = dyn_cast<BuildVectorSDNode>(Index.getOperand(1))) {
BitVector UndefElts;
if (ConstantSDNode *C = BV->getConstantSplatNode(&UndefElts)) {
diff --git a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
index cc95c3f5db343c..1535eb622da689 100644
--- a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
@@ -142,7 +142,7 @@ void XCoreDAGToDAGISel::Select(SDNode *N) {
switch (N->getOpcode()) {
default: break;
case ISD::Constant: {
- uint64_t Val = N->getAsConstantVal();
+ uint64_t Val = N->getAsZExtVal();
if (immMskBitp(N)) {
// Transformation function: get the size of a mask
// Look for the first non-zero bit
More information about the llvm-commits
mailing list