[llvm] [RFC][SelectionDAG] Add and use SDNode::getAsConstantVal() helper (PR #76710)
Alex Bradbury via llvm-commits
llvm-commits at lists.llvm.org
Tue Jan 2 03:31:54 PST 2024
https://github.com/asb created https://github.com/llvm/llvm-project/pull/76710
This follows on from #76708, allowing
`cast<ConstantSDNode>(N)->getZExtValue()` to be replaced with just `N2->getAsConstantVal();`
Introduced via `git grep -l "cast<ConstantSDNode>\(.*\).*getZExtValue" |
xargs sed -E -i 's/cast<ConstantSDNode>\((.*)\)->getZExtValue/\1->getAsConstantVal/'` and then using `git clang-format` on the result.
To me it seems this helper is filling in a logical gap given that the getConstantOperandVal helper already exists, but I'd very much welcome alternate viewpoints.
>From 1fc877226cc6215ffab72df3a6aafad82fa1b4da Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Tue, 2 Jan 2024 10:48:12 +0000
Subject: [PATCH 1/2] [llvm][NFC] Use SDValue::getConstantOperandVal(i) where
possible
This helper function shortens examples like
`cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();` to
`Node->getConstantOperandVal(1);`.
Implemented with:
`git grep -l
"cast<ConstantSDNode>\(.*->getOperand\(.*\)\)->getZExtValue\(\)" | xargs
sed -E -i
's/cast<ConstantSDNode>\((.*)->getOperand\((.*)\)\)->getZExtValue\(\)/\1->getConstantOperandVal(\2)/'
and `git grep -l
"cast<ConstantSDNode>\(.*\.getOperand\(.*\)\)->getZExtValue\(\)" | xargs
sed -E -i
's/cast<ConstantSDNode>\((.*)\.getOperand\((.*)\)\)->getZExtValue\(\)/\1.getConstantOperandVal(\2)/'`. With a couple of simple manual fixes needed. Result then processed by `git clang-format`.
---
.../lib/CodeGen/SelectionDAG/InstrEmitter.cpp | 9 +-
.../CodeGen/SelectionDAG/ScheduleDAGFast.cpp | 3 +-
.../SelectionDAG/ScheduleDAGRRList.cpp | 8 +-
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 2 +-
.../CodeGen/SelectionDAG/SelectionDAGISel.cpp | 3 +-
.../Target/AArch64/AArch64ISelDAGToDAG.cpp | 39 +++--
.../Target/AArch64/AArch64ISelLowering.cpp | 46 +++---
llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp | 8 +-
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 18 ++-
llvm/lib/Target/AMDGPU/R600ISelLowering.cpp | 6 +-
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 55 ++++---
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 4 +-
llvm/lib/Target/ARC/ARCISelLowering.cpp | 2 +-
llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp | 9 +-
llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp | 28 ++--
llvm/lib/Target/ARM/ARMISelLowering.cpp | 65 ++++-----
llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/AVR/AVRISelLowering.cpp | 5 +-
llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp | 4 +-
llvm/lib/Target/CSKY/CSKYISelLowering.cpp | 4 +-
.../Target/Hexagon/HexagonISelDAGToDAG.cpp | 34 ++---
.../Target/Hexagon/HexagonISelDAGToDAGHVX.cpp | 6 +-
.../Target/Hexagon/HexagonISelLowering.cpp | 9 +-
.../Target/Hexagon/HexagonISelLoweringHVX.cpp | 4 +-
llvm/lib/Target/Lanai/LanaiISelLowering.cpp | 4 +-
.../LoongArch/LoongArchISelLowering.cpp | 38 ++---
llvm/lib/Target/M68k/M68kISelLowering.cpp | 5 +-
llvm/lib/Target/MSP430/MSP430ISelLowering.cpp | 6 +-
llvm/lib/Target/Mips/MipsISelLowering.cpp | 4 +-
llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp | 8 +-
llvm/lib/Target/Mips/MipsSEISelLowering.cpp | 6 +-
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 6 +-
llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp | 5 +-
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 134 ++++++++----------
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 6 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 8 +-
llvm/lib/Target/Sparc/SparcISelLowering.cpp | 4 +-
.../Target/SystemZ/SystemZISelLowering.cpp | 76 +++++-----
llvm/lib/Target/SystemZ/SystemZOperators.td | 4 +-
llvm/lib/Target/VE/VEISelLowering.cpp | 14 +-
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/X86/X86ISelLowering.cpp | 2 +-
llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/XCore/XCoreISelLowering.cpp | 54 +++----
44 files changed, 355 insertions(+), 406 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index a27febe15db832..34fa1f5a7ed1fb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -495,7 +495,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
// EXTRACT_SUBREG is lowered as %dst = COPY %src:sub. There are no
// constraints on the %dst register, COPY can target all legal register
// classes.
- unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ unsigned SubIdx = Node->getConstantOperandVal(1);
const TargetRegisterClass *TRC =
TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
@@ -611,7 +611,7 @@ InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
// Create the new VReg in the destination class and emit a copy.
- unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ unsigned DstRCIdx = Node->getConstantOperandVal(1);
const TargetRegisterClass *DstRC =
TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx));
Register NewVReg = MRI->createVirtualRegister(DstRC);
@@ -629,7 +629,7 @@ InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
void InstrEmitter::EmitRegSequence(SDNode *Node,
DenseMap<SDValue, Register> &VRBaseMap,
bool IsClone, bool IsCloned) {
- unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
+ unsigned DstRCIdx = Node->getConstantOperandVal(0);
const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
Register NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC));
const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
@@ -1309,8 +1309,7 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
// Add all of the operand registers to the instruction.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
- unsigned Flags =
- cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
+ unsigned Flags = Node->getConstantOperandVal(i);
const InlineAsm::Flag F(Flags);
const unsigned NumVals = F.getNumOperandRegisters();
diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index f73ddfee2b90f8..e3acb58327a8c1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -492,8 +492,7 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
--NumOps; // Ignore the glue operand.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
- unsigned Flags =
- cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
+ unsigned Flags = Node->getConstantOperandVal(i);
const InlineAsm::Flag F(Flags);
unsigned NumVals = F.getNumOperandRegisters();
diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 47c137d2bcad73..dcecb2e0e7fafc 100644
--- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -331,7 +331,7 @@ static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
unsigned Opcode = Node->getMachineOpcode();
if (Opcode == TargetOpcode::REG_SEQUENCE) {
- unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
+ unsigned DstRCIdx = Node->getConstantOperandVal(0);
const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
RegClass = RC->getID();
Cost = RegSequenceCost;
@@ -1369,8 +1369,7 @@ DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
--NumOps; // Ignore the glue operand.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
- unsigned Flags =
- cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
+ unsigned Flags = Node->getConstantOperandVal(i);
const InlineAsm::Flag F(Flags);
unsigned NumVals = F.getNumOperandRegisters();
@@ -2298,8 +2297,7 @@ void RegReductionPQBase::unscheduledNode(SUnit *SU) {
continue;
}
if (POpc == TargetOpcode::REG_SEQUENCE) {
- unsigned DstRCIdx =
- cast<ConstantSDNode>(PN->getOperand(0))->getZExtValue();
+ unsigned DstRCIdx = PN->getConstantOperandVal(0);
const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
unsigned RCId = RC->getID();
// REG_SEQUENCE is untyped, so getRepRegClassCostFor could not be used
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 81facf92e55ae9..eb4deb6306fd5f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -7408,7 +7408,7 @@ static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
Src.getOperand(1).getOpcode() == ISD::Constant) {
G = cast<GlobalAddressSDNode>(Src.getOperand(0));
- SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
+ SrcDelta = Src.getConstantOperandVal(1);
}
if (!G)
return false;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 3dc6e4bbcf46ba..f28211ac113ca9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -4181,8 +4181,7 @@ void SelectionDAGISel::CannotYetSelect(SDNode *N) {
Msg << "\nIn function: " << MF->getName();
} else {
bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
- unsigned iid =
- cast<ConstantSDNode>(N->getOperand(HasInputChain))->getZExtValue();
+ unsigned iid = N->getConstantOperandVal(HasInputChain);
if (iid < Intrinsic::num_intrinsics)
Msg << "intrinsic %" << Intrinsic::getBaseName((Intrinsic::ID)iid);
else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 463ec41b94e974..476d99c2a7e045 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -1950,7 +1950,7 @@ void AArch64DAGToDAGISel::SelectMultiVectorMove(SDNode *N, unsigned NumVecs,
unsigned BaseReg, unsigned Op) {
unsigned TileNum = 0;
if (BaseReg != AArch64::ZA)
- TileNum = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ TileNum = N->getConstantOperandVal(2);
if (!SelectSMETile(BaseReg, TileNum))
return;
@@ -2145,8 +2145,7 @@ void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
const EVT ResTys[] = {MVT::Untyped, MVT::Other};
- unsigned LaneNo =
- cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
+ unsigned LaneNo = N->getConstantOperandVal(NumVecs + 2);
SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
N->getOperand(NumVecs + 3), N->getOperand(0)};
@@ -2185,8 +2184,7 @@ void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
const EVT ResTys[] = {MVT::i64, // Type of the write back register
RegSeq->getValueType(0), MVT::Other};
- unsigned LaneNo =
- cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
+ unsigned LaneNo = N->getConstantOperandVal(NumVecs + 1);
SDValue Ops[] = {RegSeq,
CurDAG->getTargetConstant(LaneNo, dl,
@@ -2237,8 +2235,7 @@ void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
SDValue RegSeq = createQTuple(Regs);
- unsigned LaneNo =
- cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
+ unsigned LaneNo = N->getConstantOperandVal(NumVecs + 2);
SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
N->getOperand(NumVecs + 3), N->getOperand(0)};
@@ -2269,8 +2266,7 @@ void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
const EVT ResTys[] = {MVT::i64, // Type of the write back register
MVT::Other};
- unsigned LaneNo =
- cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
+ unsigned LaneNo = N->getConstantOperandVal(NumVecs + 1);
SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
N->getOperand(NumVecs + 2), // Base Register
@@ -2576,8 +2572,8 @@ static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
case AArch64::UBFMXri:
Opc = NOpc;
Opd0 = N->getOperand(0);
- Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
- Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
+ Immr = N->getConstantOperandVal(1);
+ Imms = N->getConstantOperandVal(2);
return true;
}
// Unreachable
@@ -3877,7 +3873,7 @@ bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
assert(isa<ConstantSDNode>(N->getOperand(2)) &&
"Expected a constant integer expression.");
unsigned Reg = PMapper->Encoding;
- uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ uint64_t Immed = N->getConstantOperandVal(2);
CurDAG->SelectNodeTo(
N, State, MVT::Other, CurDAG->getTargetConstant(Reg, DL, MVT::i32),
CurDAG->getTargetConstant(Immed, DL, MVT::i16), N->getOperand(0));
@@ -4173,8 +4169,7 @@ bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) {
SDValue IRG_SP = N->getOperand(2);
if (IRG_SP->getOpcode() != ISD::INTRINSIC_W_CHAIN ||
- cast<ConstantSDNode>(IRG_SP->getOperand(1))->getZExtValue() !=
- Intrinsic::aarch64_irg_sp) {
+ IRG_SP->getConstantOperandVal(1) != Intrinsic::aarch64_irg_sp) {
return false;
}
@@ -4183,7 +4178,7 @@ bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) {
int FI = cast<FrameIndexSDNode>(N->getOperand(1))->getIndex();
SDValue FiOp = CurDAG->getTargetFrameIndex(
FI, TLI->getPointerTy(CurDAG->getDataLayout()));
- int TagOffset = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
+ int TagOffset = N->getConstantOperandVal(3);
SDNode *Out = CurDAG->getMachineNode(
AArch64::TAGPstack, DL, MVT::i64,
@@ -4203,7 +4198,7 @@ void AArch64DAGToDAGISel::SelectTagP(SDNode *N) {
// General case for unrelated pointers in Op1 and Op2.
SDLoc DL(N);
- int TagOffset = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
+ int TagOffset = N->getConstantOperandVal(3);
SDNode *N1 = CurDAG->getMachineNode(AArch64::SUBP, DL, MVT::i64,
{N->getOperand(1), N->getOperand(2)});
SDNode *N2 = CurDAG->getMachineNode(AArch64::ADDXrr, DL, MVT::i64,
@@ -4219,7 +4214,7 @@ bool AArch64DAGToDAGISel::trySelectCastFixedLengthToScalableVector(SDNode *N) {
assert(N->getOpcode() == ISD::INSERT_SUBVECTOR && "Invalid Node!");
// Bail when not a "cast" like insert_subvector.
- if (cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() != 0)
+ if (N->getConstantOperandVal(2) != 0)
return false;
if (!N->getOperand(0).isUndef())
return false;
@@ -4250,7 +4245,7 @@ bool AArch64DAGToDAGISel::trySelectCastScalableToFixedLengthVector(SDNode *N) {
assert(N->getOpcode() == ISD::EXTRACT_SUBVECTOR && "Invalid Node!");
// Bail when not a "cast" like extract_subvector.
- if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 0)
+ if (N->getConstantOperandVal(1) != 0)
return false;
// Bail when normal isel can do the job.
@@ -4422,7 +4417,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
return;
}
case ISD::INTRINSIC_W_CHAIN: {
- unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ unsigned IntNo = Node->getConstantOperandVal(1);
switch (IntNo) {
default:
break;
@@ -5179,7 +5174,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
}
} break;
case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
+ unsigned IntNo = Node->getConstantOperandVal(0);
switch (IntNo) {
default:
break;
@@ -5782,7 +5777,7 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
break;
}
case ISD::INTRINSIC_VOID: {
- unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ unsigned IntNo = Node->getConstantOperandVal(1);
if (Node->getNumOperands() >= 3)
VT = Node->getOperand(2)->getValueType(0);
switch (IntNo) {
@@ -6806,7 +6801,7 @@ static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
if (Opcode != ISD::INTRINSIC_VOID && Opcode != ISD::INTRINSIC_W_CHAIN)
return EVT();
- switch (cast<ConstantSDNode>(Root->getOperand(1))->getZExtValue()) {
+ switch (Root->getConstantOperandVal(1)) {
default:
return EVT();
case Intrinsic::aarch64_sme_ldr:
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index dffe69bdb900db..c000f33e2c5415 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2196,7 +2196,7 @@ void AArch64TargetLowering::computeKnownBitsForTargetNode(
}
case ISD::INTRINSIC_WO_CHAIN:
case ISD::INTRINSIC_VOID: {
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IntNo = Op.getConstantOperandVal(0);
switch (IntNo) {
default:
break;
@@ -3922,9 +3922,9 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
// 4: bool isDataCache
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) {
SDLoc DL(Op);
- unsigned IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
- unsigned Locality = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
- unsigned IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
+ unsigned IsWrite = Op.getConstantOperandVal(2);
+ unsigned Locality = Op.getConstantOperandVal(3);
+ unsigned IsData = Op.getConstantOperandVal(4);
bool IsStream = !Locality;
// When the locality number is set
@@ -4973,10 +4973,10 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_VOID(SDValue Op,
SDValue Chain = Op.getOperand(0);
SDValue Addr = Op.getOperand(2);
- unsigned IsWrite = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
- unsigned Locality = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
- unsigned IsStream = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
- unsigned IsData = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
+ unsigned IsWrite = Op.getConstantOperandVal(3);
+ unsigned Locality = Op.getConstantOperandVal(4);
+ unsigned IsStream = Op.getConstantOperandVal(5);
+ unsigned IsData = Op.getConstantOperandVal(6);
unsigned PrfOp = (IsWrite << 4) | // Load/Store bit
(!IsData << 3) | // IsDataCache bit
(Locality << 1) | // Cache level bits
@@ -5039,7 +5039,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IntNo = Op.getConstantOperandVal(0);
SDLoc dl(Op);
switch (IntNo) {
default: return SDValue(); // Don't custom lower most intrinsics.
@@ -5218,8 +5218,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getNode(AArch64ISD::SPLICE, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
case Intrinsic::aarch64_sve_ptrue:
- return getPTrue(DAG, dl, Op.getValueType(),
- cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
+ return getPTrue(DAG, dl, Op.getValueType(), Op.getConstantOperandVal(1));
case Intrinsic::aarch64_sve_clz:
return DAG.getNode(AArch64ISD::CTLZ_MERGE_PASSTHRU, dl, Op.getValueType(),
Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
@@ -6478,7 +6477,7 @@ static unsigned getIntrinsicID(const SDNode *N) {
default:
return Intrinsic::not_intrinsic;
case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IID = N->getConstantOperandVal(0);
if (IID < Intrinsic::num_intrinsics)
return IID;
return Intrinsic::not_intrinsic;
@@ -10009,7 +10008,7 @@ SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op,
EVT VT = Op.getValueType();
SDLoc DL(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
SDValue FrameAddr =
DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, MVT::i64);
while (Depth--)
@@ -10076,7 +10075,7 @@ SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
EVT VT = Op.getValueType();
SDLoc DL(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
SDValue ReturnAddress;
if (Depth) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
@@ -10942,7 +10941,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
// Update the minimum and maximum lane number seen.
- unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
+ unsigned EltNo = V.getConstantOperandVal(1);
Source->MinElt = std::min(Source->MinElt, EltNo);
Source->MaxElt = std::max(Source->MaxElt, EltNo);
}
@@ -13329,7 +13328,7 @@ SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
"Only cases that extract a fixed length vector are supported!");
EVT InVT = Op.getOperand(0).getValueType();
- unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ unsigned Idx = Op.getConstantOperandVal(1);
unsigned Size = Op.getValueSizeInBits();
// If we don't have legal types yet, do nothing
@@ -13375,7 +13374,7 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
"Only expect to lower inserts into scalable vectors!");
EVT InVT = Op.getOperand(1).getValueType();
- unsigned Idx = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ unsigned Idx = Op.getConstantOperandVal(2);
SDValue Vec0 = Op.getOperand(0);
SDValue Vec1 = Op.getOperand(1);
@@ -18399,8 +18398,8 @@ static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) {
// TODO: we want the operands of the Cmp not the csel
SetCCInfo.Info.AArch64.Cmp = &Op.getOperand(3);
SetCCInfo.IsAArch64 = true;
- SetCCInfo.Info.AArch64.CC = static_cast<AArch64CC::CondCode>(
- cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
+ SetCCInfo.Info.AArch64.CC =
+ static_cast<AArch64CC::CondCode>(Op.getConstantOperandVal(2));
// Check that the operands matches the constraints:
// (1) Both operands must be constants.
@@ -21585,7 +21584,7 @@ static SDValue performNEONPostLDSTCombine(SDNode *N,
bool IsDupOp = false;
unsigned NewOpc = 0;
unsigned NumVecs = 0;
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned IntNo = N->getConstantOperandVal(1);
switch (IntNo) {
default: llvm_unreachable("unexpected intrinsic for Neon base update");
case Intrinsic::aarch64_neon_ld2: NewOpc = AArch64ISD::LD2post;
@@ -22501,7 +22500,7 @@ static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert,
static SDValue performTBZCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) {
- unsigned Bit = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ unsigned Bit = N->getConstantOperandVal(2);
bool Invert = false;
SDValue TestSrc = N->getOperand(1);
SDValue NewTestSrc = getTestBitOperand(TestSrc, Bit, Invert, DAG);
@@ -23789,7 +23788,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
return performMULLCombine(N, DCI, DAG);
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN:
- switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
+ switch (N->getConstantOperandVal(1)) {
case Intrinsic::aarch64_sve_prfb_gather_scalar_offset:
return combineSVEPrefetchVecBaseImmOff(N, DAG, 1 /*=ScalarSizeInBytes*/);
case Intrinsic::aarch64_sve_prfh_gather_scalar_offset:
@@ -23940,8 +23939,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
return performScatterStoreCombine(N, DAG, AArch64ISD::SST1_IMM_PRED);
case Intrinsic::aarch64_rndr:
case Intrinsic::aarch64_rndrrs: {
- unsigned IntrinsicID =
- cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned IntrinsicID = N->getConstantOperandVal(1);
auto Register =
(IntrinsicID == Intrinsic::aarch64_rndr ? AArch64SysReg::RNDR
: AArch64SysReg::RNDRRS);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index b0eac567ec9f18..28604af2cdb3ce 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -377,7 +377,7 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
return Subtarget->getRegisterInfo()->getRegClass(RegClass);
}
case AMDGPU::REG_SEQUENCE: {
- unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned RCID = N->getConstantOperandVal(0);
const TargetRegisterClass *SuperRC =
Subtarget->getRegisterInfo()->getRegClass(RCID);
@@ -2672,7 +2672,7 @@ void AMDGPUDAGToDAGISel::SelectInterpP1F16(SDNode *N) {
}
void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) {
- unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned IntrID = N->getConstantOperandVal(1);
switch (IntrID) {
case Intrinsic::amdgcn_ds_append:
case Intrinsic::amdgcn_ds_consume: {
@@ -2690,7 +2690,7 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) {
}
void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
- unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IntrID = N->getConstantOperandVal(0);
unsigned Opcode;
switch (IntrID) {
case Intrinsic::amdgcn_wqm:
@@ -2731,7 +2731,7 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
}
void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) {
- unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned IntrID = N->getConstantOperandVal(1);
switch (IntrID) {
case Intrinsic::amdgcn_ds_gws_init:
case Intrinsic::amdgcn_ds_gws_barrier:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 541a5b62450ddf..8fbc90a6db9fdc 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -682,7 +682,7 @@ static bool hasSourceMods(const SDNode *N) {
case ISD::BITCAST:
return false;
case ISD::INTRINSIC_WO_CHAIN: {
- switch (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()) {
+ switch (N->getConstantOperandVal(0)) {
case Intrinsic::amdgcn_interp_p1:
case Intrinsic::amdgcn_interp_p2:
case Intrinsic::amdgcn_interp_mov:
@@ -837,7 +837,7 @@ bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode *N) const {
case ISD::TokenFactor:
return true;
case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IntrID = N->getConstantOperandVal(0);
switch (IntrID) {
case Intrinsic::amdgcn_readfirstlane:
case Intrinsic::amdgcn_readlane:
@@ -1489,7 +1489,7 @@ SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
SelectionDAG &DAG) const {
SDLoc SL(Op);
SmallVector<SDValue, 8> Args;
- unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ unsigned Start = Op.getConstantOperandVal(1);
EVT VT = Op.getValueType();
EVT SrcVT = Op.getOperand(0).getValueType();
@@ -2502,8 +2502,7 @@ static bool valueIsKnownNeverF32Denorm(SDValue Src) {
case ISD::FFREXP:
return true;
case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IntrinsicID =
- cast<ConstantSDNode>(Src.getOperand(0))->getZExtValue();
+ unsigned IntrinsicID = Src.getConstantOperandVal(0);
switch (IntrinsicID) {
case Intrinsic::amdgcn_frexp_mant:
return true;
@@ -3601,7 +3600,7 @@ static SDValue simplifyMul24(SDNode *Node24,
SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1);
unsigned NewOpcode = Node24->getOpcode();
if (IsIntrin) {
- unsigned IID = cast<ConstantSDNode>(Node24->getOperand(0))->getZExtValue();
+ unsigned IID = Node24->getConstantOperandVal(0);
switch (IID) {
case Intrinsic::amdgcn_mul_i24:
NewOpcode = AMDGPUISD::MUL_I24;
@@ -3821,7 +3820,7 @@ SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N,
SDValue AMDGPUTargetLowering::performIntrinsicWOChainCombine(
SDNode *N, DAGCombinerInfo &DCI) const {
- unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IID = N->getConstantOperandVal(0);
switch (IID) {
case Intrinsic::amdgcn_mul_i24:
case Intrinsic::amdgcn_mul_u24:
@@ -5652,7 +5651,7 @@ void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
break;
}
case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IID = Op.getConstantOperandVal(0);
switch (IID) {
case Intrinsic::amdgcn_workitem_id_x:
case Intrinsic::amdgcn_workitem_id_y:
@@ -5834,8 +5833,7 @@ bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
return SNaN;
}
case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IntrinsicID
- = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IntrinsicID = Op.getConstantOperandVal(0);
// TODO: Handle more intrinsics
switch (IntrinsicID) {
case Intrinsic::amdgcn_cubeid:
diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index c1ba9c514874eb..9a2fb0bc37b2c0 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -424,8 +424,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
return lowerADDRSPACECAST(Op, DAG);
case ISD::INTRINSIC_VOID: {
SDValue Chain = Op.getOperand(0);
- unsigned IntrinsicID =
- cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ unsigned IntrinsicID = Op.getConstantOperandVal(1);
switch (IntrinsicID) {
case Intrinsic::r600_store_swizzle: {
SDLoc DL(Op);
@@ -449,8 +448,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
break;
}
case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IntrinsicID =
- cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IntrinsicID = Op.getConstantOperandVal(0);
EVT VT = Op.getValueType();
SDLoc DL(Op);
switch (IntrinsicID) {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index f3547db9e9bd94..2cb08e025dd901 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5388,7 +5388,7 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
// Get the rounding mode from the last operand
- int RoundMode = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ int RoundMode = Op.getConstantOperandVal(1);
if (RoundMode == (int)RoundingMode::TowardPositive)
Opc = AMDGPUISD::FPTRUNC_ROUND_UPWARD;
else if (RoundMode == (int)RoundingMode::TowardNegative)
@@ -5698,7 +5698,7 @@ void SITargetLowering::ReplaceNodeResults(SDNode *N,
return;
}
case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IID = N->getConstantOperandVal(0);
switch (IID) {
case Intrinsic::amdgcn_make_buffer_rsrc:
Results.push_back(lowerPointerAsRsrcIntrin(N, DAG));
@@ -5836,7 +5836,7 @@ static SDNode *findUser(SDValue Value, unsigned Opcode) {
unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
- switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
+ switch (Intr->getConstantOperandVal(1)) {
case Intrinsic::amdgcn_if:
return AMDGPUISD::IF;
case Intrinsic::amdgcn_else:
@@ -5985,7 +5985,7 @@ SDValue SITargetLowering::LowerRETURNADDR(SDValue Op,
MVT VT = Op.getSimpleValueType();
SDLoc DL(Op);
// Checking the depth
- if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0)
+ if (Op.getConstantOperandVal(0) != 0)
return DAG.getConstant(0, DL, VT);
MachineFunction &MF = DAG.getMachineFunction();
@@ -7634,7 +7634,7 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
EVT VT = Op.getValueType();
SDLoc DL(Op);
- unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IntrinsicID = Op.getConstantOperandVal(0);
// TODO: Should this propagate fast-math-flags?
@@ -7788,7 +7788,7 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(),
SDLoc(Op), MVT::i32);
case Intrinsic::amdgcn_s_buffer_load: {
- unsigned CPol = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
+ unsigned CPol = Op.getConstantOperandVal(3);
if (CPol & ~((Subtarget->getGeneration() >= AMDGPUSubtarget::GFX12)
? AMDGPU::CPol::ALL
: AMDGPU::CPol::ALL_pregfx12))
@@ -8038,7 +8038,7 @@ SITargetLowering::lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG,
SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
- unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ unsigned IntrID = Op.getConstantOperandVal(1);
SDLoc DL(Op);
switch (IntrID) {
@@ -8134,8 +8134,8 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
}
case Intrinsic::amdgcn_buffer_load:
case Intrinsic::amdgcn_buffer_load_format: {
- unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
- unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
+ unsigned Glc = Op.getConstantOperandVal(5);
+ unsigned Slc = Op.getConstantOperandVal(6);
unsigned IdxEn = getIdxEn(Op.getOperand(3));
SDValue Ops[] = {
Op.getOperand(0), // Chain
@@ -8223,10 +8223,10 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
EVT LoadVT = Op.getValueType();
auto SOffset = selectSOffset(Op.getOperand(5), DAG, Subtarget);
- unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
- unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
- unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
- unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
+ unsigned Dfmt = Op.getConstantOperandVal(7);
+ unsigned Nfmt = Op.getConstantOperandVal(8);
+ unsigned Glc = Op.getConstantOperandVal(9);
+ unsigned Slc = Op.getConstantOperandVal(10);
unsigned IdxEn = getIdxEn(Op.getOperand(3));
SDValue Ops[] = {
Op.getOperand(0), // Chain
@@ -8313,7 +8313,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::amdgcn_buffer_atomic_or:
case Intrinsic::amdgcn_buffer_atomic_xor:
case Intrinsic::amdgcn_buffer_atomic_fadd: {
- unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
+ unsigned Slc = Op.getConstantOperandVal(6);
unsigned IdxEn = getIdxEn(Op.getOperand(4));
SDValue Ops[] = {
Op.getOperand(0), // Chain
@@ -8474,7 +8474,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC);
case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
- unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
+ unsigned Slc = Op.getConstantOperandVal(7);
unsigned IdxEn = getIdxEn(Op.getOperand(5));
SDValue Ops[] = {
Op.getOperand(0), // Chain
@@ -8878,7 +8878,7 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
SDValue Chain = Op.getOperand(0);
- unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ unsigned IntrinsicID = Op.getConstantOperandVal(1);
MachineFunction &MF = DAG.getMachineFunction();
switch (IntrinsicID) {
@@ -8943,10 +8943,10 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
if (IsD16)
VData = handleD16VData(VData, DAG);
- unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
- unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
- unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
- unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
+ unsigned Dfmt = Op.getConstantOperandVal(8);
+ unsigned Nfmt = Op.getConstantOperandVal(9);
+ unsigned Glc = Op.getConstantOperandVal(10);
+ unsigned Slc = Op.getConstantOperandVal(11);
unsigned IdxEn = getIdxEn(Op.getOperand(4));
SDValue Ops[] = {
Chain,
@@ -9029,8 +9029,8 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16);
if (IsD16)
VData = handleD16VData(VData, DAG);
- unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
- unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
+ unsigned Glc = Op.getConstantOperandVal(6);
+ unsigned Slc = Op.getConstantOperandVal(7);
unsigned IdxEn = getIdxEn(Op.getOperand(4));
SDValue Ops[] = {
Chain,
@@ -12069,8 +12069,7 @@ bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
return false;
}
case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IntrinsicID
- = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IntrinsicID = Op.getConstantOperandVal(0);
// TODO: Handle more intrinsics
switch (IntrinsicID) {
case Intrinsic::amdgcn_cvt_pkrtz:
@@ -15008,7 +15007,7 @@ void SITargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
unsigned Opc = Op.getOpcode();
switch (Opc) {
case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IID = Op.getConstantOperandVal(0);
switch (IID) {
case Intrinsic::amdgcn_mbcnt_lo:
case Intrinsic::amdgcn_mbcnt_hi: {
@@ -15251,11 +15250,9 @@ bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode *N,
case ISD::CALLSEQ_END:
return true;
case ISD::INTRINSIC_WO_CHAIN:
- return AMDGPU::isIntrinsicSourceOfDivergence(
- cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
+ return AMDGPU::isIntrinsicSourceOfDivergence(N->getConstantOperandVal(0));
case ISD::INTRINSIC_W_CHAIN:
- return AMDGPU::isIntrinsicSourceOfDivergence(
- cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
+ return AMDGPU::isIntrinsicSourceOfDivergence(N->getConstantOperandVal(1));
case AMDGPUISD::ATOMIC_CMP_SWAP:
case AMDGPUISD::ATOMIC_LOAD_FMIN:
case AMDGPUISD::ATOMIC_LOAD_FMAX:
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 2fb3957a1ca9dc..aae6f2e842fd1a 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -273,8 +273,8 @@ bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
// subtract the index by one.
Offset0Idx -= get(Opc0).NumDefs;
Offset1Idx -= get(Opc1).NumDefs;
- Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue();
- Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue();
+ Offset0 = Load0->getConstantOperandVal(Offset0Idx);
+ Offset1 = Load1->getConstantOperandVal(Offset1Idx);
return true;
}
diff --git a/llvm/lib/Target/ARC/ARCISelLowering.cpp b/llvm/lib/Target/ARC/ARCISelLowering.cpp
index 5d9a366f5ed540..2265f5db6737e8 100644
--- a/llvm/lib/Target/ARC/ARCISelLowering.cpp
+++ b/llvm/lib/Target/ARC/ARCISelLowering.cpp
@@ -751,7 +751,7 @@ SDValue ARCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDLoc dl(Op);
- assert(cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() == 0 &&
+ assert(Op.getConstantOperandVal(0) == 0 &&
"Only support lowering frame addr of current frame.");
Register FrameReg = ARI.getFrameRegister(MF);
return DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index a0776296b8ebc7..ef02dc99701149 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -4499,8 +4499,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
default: break;
case ARM::LDRrs:
case ARM::LDRBrs: {
- unsigned ShOpVal =
- cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
+ unsigned ShOpVal = DefNode->getConstantOperandVal(2);
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
if (ShImm == 0 ||
(ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
@@ -4512,8 +4511,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
case ARM::t2LDRHs:
case ARM::t2LDRSHs: {
// Thumb2 mode: lsl only.
- unsigned ShAmt =
- cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
+ unsigned ShAmt = DefNode->getConstantOperandVal(2);
if (ShAmt == 0 || ShAmt == 2)
Latency = *Latency - 1;
break;
@@ -4526,8 +4524,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
default: break;
case ARM::LDRrs:
case ARM::LDRBrs: {
- unsigned ShOpVal =
- cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
+ unsigned ShOpVal = DefNode->getConstantOperandVal(2);
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
if (ShImm == 0 ||
((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 984d8d3e0b08c4..adc429b61bbcce 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -2422,8 +2422,7 @@ void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
SDValue Chain = N->getOperand(0);
- unsigned Lane =
- cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
+ unsigned Lane = N->getConstantOperandVal(Vec0Idx + NumVecs);
EVT VT = N->getOperand(Vec0Idx).getValueType();
bool is64BitVector = VT.is64BitVector();
@@ -2587,7 +2586,7 @@ void ARMDAGToDAGISel::SelectMVE_WB(SDNode *N, const uint16_t *Opcodes,
Ops.push_back(N->getOperand(2)); // vector of base addresses
- int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
+ int32_t ImmValue = N->getConstantOperandVal(3);
Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate offset
if (Predicated)
@@ -2622,7 +2621,7 @@ void ARMDAGToDAGISel::SelectMVE_LongShift(SDNode *N, uint16_t Opcode,
// The shift count
if (Immediate) {
- int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
+ int32_t ImmValue = N->getConstantOperandVal(3);
Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate shift count
} else {
Ops.push_back(N->getOperand(3));
@@ -2630,7 +2629,7 @@ void ARMDAGToDAGISel::SelectMVE_LongShift(SDNode *N, uint16_t Opcode,
// The immediate saturation operand, if any
if (HasSaturationOperand) {
- int32_t SatOp = cast<ConstantSDNode>(N->getOperand(4))->getZExtValue();
+ int32_t SatOp = N->getConstantOperandVal(4);
int SatBit = (SatOp == 64 ? 0 : 1);
Ops.push_back(getI32Imm(SatBit, Loc));
}
@@ -2685,7 +2684,7 @@ void ARMDAGToDAGISel::SelectMVE_VSHLC(SDNode *N, bool Predicated) {
// and then an immediate shift count
Ops.push_back(N->getOperand(1));
Ops.push_back(N->getOperand(2));
- int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
+ int32_t ImmValue = N->getConstantOperandVal(3);
Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate shift count
if (Predicated)
@@ -4138,14 +4137,13 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
if (InGlue.getOpcode() == ARMISD::CMPZ) {
if (InGlue.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN) {
SDValue Int = InGlue.getOperand(0);
- uint64_t ID = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue();
+ uint64_t ID = Int->getConstantOperandVal(1);
// Handle low-overhead loops.
if (ID == Intrinsic::loop_decrement_reg) {
SDValue Elements = Int.getOperand(2);
- SDValue Size = CurDAG->getTargetConstant(
- cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl,
- MVT::i32);
+ SDValue Size = CurDAG->getTargetConstant(Int.getConstantOperandVal(3),
+ dl, MVT::i32);
SDValue Args[] = { Elements, Size, Int.getOperand(0) };
SDNode *LoopDec =
@@ -4715,7 +4713,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN: {
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned IntNo = N->getConstantOperandVal(1);
switch (IntNo) {
default:
break;
@@ -4732,9 +4730,9 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
Opc = (IntNo == Intrinsic::arm_mrrc ? ARM::MRRC : ARM::MRRC2);
SmallVector<SDValue, 5> Ops;
- Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(), dl)); /* coproc */
- Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(3))->getZExtValue(), dl)); /* opc */
- Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(4))->getZExtValue(), dl)); /* CRm */
+ Ops.push_back(getI32Imm(N->getConstantOperandVal(2), dl)); /* coproc */
+ Ops.push_back(getI32Imm(N->getConstantOperandVal(3), dl)); /* opc */
+ Ops.push_back(getI32Imm(N->getConstantOperandVal(4), dl)); /* CRm */
// The mrrc2 instruction in ARM doesn't allow predicates, the top 4 bits of the encoded
// instruction will always be '1111' but it is possible in assembly language to specify
@@ -5181,7 +5179,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
}
case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IntNo = N->getConstantOperandVal(0);
switch (IntNo) {
default:
break;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index d00b7853816e19..cf9646a0b81ed0 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -4110,7 +4110,7 @@ SDValue ARMTargetLowering::LowerINTRINSIC_VOID(
SDValue
ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) const {
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IntNo = Op.getConstantOperandVal(0);
SDLoc dl(Op);
switch (IntNo) {
default: return SDValue(); // Don't custom lower most intrinsics.
@@ -4289,13 +4289,13 @@ static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
return Op.getOperand(0);
SDLoc dl(Op);
- unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
+ unsigned isRead = ~Op.getConstantOperandVal(2) & 1;
if (!isRead &&
(!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
// ARMv7 with MP extension has PLDW.
return Op.getOperand(0);
- unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
+ unsigned isData = Op.getConstantOperandVal(4);
if (Subtarget->isThumb()) {
// Invert the bits.
isRead = ~isRead & 1;
@@ -4800,7 +4800,7 @@ SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
LHS->hasOneUse() && isa<ConstantSDNode>(LHS.getOperand(1)) &&
LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(RHS) &&
!isSignedIntSetCC(CC)) {
- unsigned Mask = cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue();
+ unsigned Mask = LHS.getConstantOperandVal(1);
auto *RHSC = cast<ConstantSDNode>(RHS.getNode());
uint64_t RHSV = RHSC->getZExtValue();
if (isMask_32(Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) {
@@ -4823,9 +4823,8 @@ SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
isa<ConstantSDNode>(RHS) &&
cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U &&
CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) &&
- cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() < 31) {
- unsigned ShiftAmt =
- cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() + 1;
+ LHS.getConstantOperandVal(1) < 31) {
+ unsigned ShiftAmt = LHS.getConstantOperandVal(1) + 1;
SDValue Shift = DAG.getNode(ARMISD::LSLS, dl,
DAG.getVTList(MVT::i32, MVT::i32),
LHS.getOperand(0),
@@ -6112,7 +6111,7 @@ SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
EVT VT = Op.getValueType();
SDLoc dl(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
if (Depth) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
@@ -6135,7 +6134,7 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDLoc dl(Op); // FIXME probably not meaningful
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
Register FrameReg = ARI.getFrameRegister(MF);
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
while (Depth--)
@@ -8221,7 +8220,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
// Update the minimum and maximum lane number seen.
- unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
+ unsigned EltNo = V.getConstantOperandVal(1);
Source->MinElt = std::min(Source->MinElt, EltNo);
Source->MaxElt = std::max(Source->MaxElt, EltNo);
}
@@ -9034,7 +9033,7 @@ static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
SDValue Conv =
DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0));
- unsigned Lane = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ unsigned Lane = Op.getConstantOperandVal(2);
unsigned LaneWidth =
getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8;
unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth;
@@ -9097,7 +9096,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
SDValue Conv =
DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0));
- unsigned Lane = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ unsigned Lane = Op.getConstantOperandVal(1);
unsigned LaneWidth =
getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8;
SDValue Shift = DAG.getNode(ISD::SRL, dl, MVT::i32, Conv,
@@ -10682,7 +10681,7 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) {
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IntNo = N->getConstantOperandVal(0);
unsigned Opc = 0;
if (IntNo == Intrinsic::arm_smlald)
Opc = ARMISD::SMLALD;
@@ -14908,7 +14907,7 @@ static SDValue PerformBFICombine(SDNode *N, SelectionDAG &DAG) {
ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
if (!N11C)
return SDValue();
- unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ unsigned InvMask = N->getConstantOperandVal(2);
unsigned LSB = llvm::countr_zero(~InvMask);
unsigned Width = llvm::bit_width<unsigned>(~InvMask) - LSB;
assert(Width <
@@ -15448,8 +15447,7 @@ static SDValue PerformVCMPCombine(SDNode *N, SelectionDAG &DAG,
EVT VT = N->getValueType(0);
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
- ARMCC::CondCodes Cond =
- (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ ARMCC::CondCodes Cond = (ARMCC::CondCodes)N->getConstantOperandVal(2);
SDLoc dl(N);
// vcmp X, 0, cc -> vcmpz X, cc
@@ -15794,7 +15792,7 @@ static bool TryCombineBaseUpdate(struct BaseUpdateTarget &Target,
unsigned NewOpc = 0;
unsigned NumVecs = 0;
if (Target.isIntrinsic) {
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned IntNo = N->getConstantOperandVal(1);
switch (IntNo) {
default:
llvm_unreachable("unexpected intrinsic for Neon base update");
@@ -16254,12 +16252,10 @@ static SDValue PerformMVEVLDCombine(SDNode *N,
// For the stores, where there are multiple intrinsics we only actually want
// to post-inc the last of the them.
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
- if (IntNo == Intrinsic::arm_mve_vst2q &&
- cast<ConstantSDNode>(N->getOperand(5))->getZExtValue() != 1)
+ unsigned IntNo = N->getConstantOperandVal(1);
+ if (IntNo == Intrinsic::arm_mve_vst2q && N->getConstantOperandVal(5) != 1)
return SDValue();
- if (IntNo == Intrinsic::arm_mve_vst4q &&
- cast<ConstantSDNode>(N->getOperand(7))->getZExtValue() != 3)
+ if (IntNo == Intrinsic::arm_mve_vst4q && N->getConstantOperandVal(7) != 3)
return SDValue();
// Search for a use of the address operand that is an increment.
@@ -16381,7 +16377,7 @@ static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
return false;
unsigned NumVecs = 0;
unsigned NewOpc = 0;
- unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
+ unsigned IntNo = VLD->getConstantOperandVal(1);
if (IntNo == Intrinsic::arm_neon_vld2lane) {
NumVecs = 2;
NewOpc = ARMISD::VLD2DUP;
@@ -16397,8 +16393,7 @@ static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
// First check that all the vldN-lane uses are VDUPLANEs and that the lane
// numbers match the load.
- unsigned VLDLaneNo =
- cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
+ unsigned VLDLaneNo = VLD->getConstantOperandVal(NumVecs + 3);
for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
UI != UE; ++UI) {
// Ignore uses of the chain result.
@@ -16406,7 +16401,7 @@ static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
continue;
SDNode *User = *UI;
if (User->getOpcode() != ARMISD::VDUPLANE ||
- VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
+ VLDLaneNo != User->getConstantOperandVal(1))
return false;
}
@@ -16479,7 +16474,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N,
// Make sure the VMOV element size is not bigger than the VDUPLANE elements.
unsigned EltSize = Op.getScalarValueSizeInBits();
// The canonical VMOV for a zero vector uses a 32-bit element size.
- unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Imm = Op.getConstantOperandVal(0);
unsigned EltBits;
if (ARM_AM::decodeVMOVModImm(Imm, EltBits) == 0)
EltSize = 8;
@@ -17479,7 +17474,7 @@ static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) {
SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IntNo = N->getConstantOperandVal(0);
switch (IntNo) {
default:
// Don't do anything for most intrinsics.
@@ -17669,7 +17664,7 @@ SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N,
case Intrinsic::arm_mve_addv: {
// Turn this intrinsic straight into the appropriate ARMISD::VADDV node,
// which allow PerformADDVecReduce to turn it into VADDLV when possible.
- bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ bool Unsigned = N->getConstantOperandVal(2);
unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs;
return DAG.getNode(Opc, SDLoc(N), N->getVTList(), N->getOperand(1));
}
@@ -17678,7 +17673,7 @@ SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N,
case Intrinsic::arm_mve_addlv_predicated: {
// Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR
// which recombines the two outputs into an i64
- bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ bool Unsigned = N->getConstantOperandVal(2);
unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ?
(Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) :
(Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps);
@@ -18193,7 +18188,7 @@ static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm,
return SearchLoopIntrinsic(N->getOperand(0), CC, Imm, Negate);
}
case ISD::INTRINSIC_W_CHAIN: {
- unsigned IntOp = cast<ConstantSDNode>(N.getOperand(1))->getZExtValue();
+ unsigned IntOp = N.getConstantOperandVal(1);
if (IntOp != Intrinsic::test_start_loop_iterations &&
IntOp != Intrinsic::loop_decrement_reg)
return SDValue();
@@ -18271,7 +18266,7 @@ static SDValue PerformHWLoopCombine(SDNode *N,
SDLoc dl(Int);
SelectionDAG &DAG = DCI.DAG;
SDValue Elements = Int.getOperand(2);
- unsigned IntOp = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue();
+ unsigned IntOp = Int->getConstantOperandVal(1);
assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR)
&& "expected single br user");
SDNode *Br = *N->use_begin();
@@ -18305,8 +18300,8 @@ static SDValue PerformHWLoopCombine(SDNode *N,
DAG.ReplaceAllUsesOfValueWith(Int.getValue(2), Int.getOperand(0));
return Res;
} else {
- SDValue Size = DAG.getTargetConstant(
- cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl, MVT::i32);
+ SDValue Size =
+ DAG.getTargetConstant(Int.getConstantOperandVal(3), dl, MVT::i32);
SDValue Args[] = { Int.getOperand(0), Elements, Size, };
SDValue LoopDec = DAG.getNode(ARMISD::LOOP_DEC, dl,
DAG.getVTList(MVT::i32, MVT::Other), Args);
@@ -19051,7 +19046,7 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
}
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN:
- switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
+ switch (N->getConstantOperandVal(1)) {
case Intrinsic::arm_neon_vld1:
case Intrinsic::arm_neon_vld1x2:
case Intrinsic::arm_neon_vld1x3:
diff --git a/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp b/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp
index 196122e45ab8d5..e67a1e2ed50904 100644
--- a/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AVR/AVRISelDAGToDAG.cpp
@@ -335,7 +335,7 @@ template <> bool AVRDAGToDAGISel::select<ISD::STORE>(SDNode *N) {
return false;
}
- int CST = (int)cast<ConstantSDNode>(BasePtr.getOperand(1))->getZExtValue();
+ int CST = (int)BasePtr.getConstantOperandVal(1);
SDValue Chain = ST->getChain();
EVT VT = ST->getValue().getValueType();
SDLoc DL(N);
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index cd1dcfaea0eb17..d36bfb188ed360 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -298,8 +298,7 @@ SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
SDValue SrcHi =
DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i16, Op.getOperand(0),
DAG.getConstant(1, dl, MVT::i16));
- uint64_t ShiftAmount =
- cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ uint64_t ShiftAmount = N->getConstantOperandVal(1);
if (ShiftAmount == 16) {
// Special case these two operations because they appear to be used by the
// generic codegen parts to lower 32-bit numbers.
@@ -367,7 +366,7 @@ SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
}
}
- uint64_t ShiftAmount = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ uint64_t ShiftAmount = N->getConstantOperandVal(1);
SDValue Victim = N->getOperand(0);
switch (Op.getOpcode()) {
diff --git a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
index 909c7c005735b6..d8139958e9fcf2 100644
--- a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
+++ b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
@@ -193,7 +193,7 @@ void BPFDAGToDAGISel::Select(SDNode *Node) {
default:
break;
case ISD::INTRINSIC_W_CHAIN: {
- unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ unsigned IntNo = Node->getConstantOperandVal(1);
switch (IntNo) {
case Intrinsic::bpf_load_byte:
case Intrinsic::bpf_load_half:
@@ -469,7 +469,7 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
if (BaseV.getOpcode() != ISD::INTRINSIC_W_CHAIN)
return;
- unsigned IntNo = cast<ConstantSDNode>(BaseV->getOperand(1))->getZExtValue();
+ unsigned IntNo = BaseV->getConstantOperandVal(1);
uint64_t MaskV = MaskN->getZExtValue();
if (!((IntNo == Intrinsic::bpf_load_byte && MaskV == 0xFF) ||
diff --git a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp
index e3b4a2dc048ab2..90f70b83a02d34 100644
--- a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp
+++ b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp
@@ -1219,7 +1219,7 @@ SDValue CSKYTargetLowering::LowerFRAMEADDR(SDValue Op,
EVT VT = Op.getValueType();
SDLoc dl(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
Register FrameReg = RI.getFrameRegister(MF);
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
while (Depth--)
@@ -1240,7 +1240,7 @@ SDValue CSKYTargetLowering::LowerRETURNADDR(SDValue Op,
EVT VT = Op.getValueType();
SDLoc dl(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
if (Depth) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index f930015026a5cd..eb5c59672224e9 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -192,7 +192,7 @@ MachineSDNode *HexagonDAGToDAGISel::LoadInstrForLoadIntrinsic(SDNode *IntN) {
return nullptr;
SDLoc dl(IntN);
- unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
+ unsigned IntNo = IntN->getConstantOperandVal(1);
static std::map<unsigned,unsigned> LoadPciMap = {
{ Intrinsic::hexagon_circ_ldb, Hexagon::L2_loadrb_pci },
@@ -284,18 +284,18 @@ bool HexagonDAGToDAGISel::tryLoadOfLoadIntrinsic(LoadSDNode *N) {
// can provide an address of an unsigned variable to store the result of
// a sign-extending intrinsic into (or the other way around).
ISD::LoadExtType IntExt;
- switch (cast<ConstantSDNode>(C->getOperand(1))->getZExtValue()) {
- case Intrinsic::hexagon_circ_ldub:
- case Intrinsic::hexagon_circ_lduh:
- IntExt = ISD::ZEXTLOAD;
- break;
- case Intrinsic::hexagon_circ_ldw:
- case Intrinsic::hexagon_circ_ldd:
- IntExt = ISD::NON_EXTLOAD;
- break;
- default:
- IntExt = ISD::SEXTLOAD;
- break;
+ switch (C->getConstantOperandVal(1)) {
+ case Intrinsic::hexagon_circ_ldub:
+ case Intrinsic::hexagon_circ_lduh:
+ IntExt = ISD::ZEXTLOAD;
+ break;
+ case Intrinsic::hexagon_circ_ldw:
+ case Intrinsic::hexagon_circ_ldd:
+ IntExt = ISD::NON_EXTLOAD;
+ break;
+ default:
+ IntExt = ISD::SEXTLOAD;
+ break;
}
if (N->getExtensionType() != IntExt)
return false;
@@ -325,7 +325,7 @@ bool HexagonDAGToDAGISel::SelectBrevLdIntrinsic(SDNode *IntN) {
return false;
const SDLoc &dl(IntN);
- unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
+ unsigned IntNo = IntN->getConstantOperandVal(1);
static const std::map<unsigned, unsigned> LoadBrevMap = {
{ Intrinsic::hexagon_L2_loadrb_pbr, Hexagon::L2_loadrb_pbr },
@@ -366,7 +366,7 @@ bool HexagonDAGToDAGISel::SelectNewCircIntrinsic(SDNode *IntN) {
return false;
SDLoc DL(IntN);
- unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
+ unsigned IntNo = IntN->getConstantOperandVal(1);
SmallVector<SDValue, 7> Ops;
static std::map<unsigned,unsigned> LoadNPcMap = {
@@ -641,7 +641,7 @@ void HexagonDAGToDAGISel::SelectIntrinsicWChain(SDNode *N) {
if (SelectNewCircIntrinsic(N))
return;
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned IntNo = N->getConstantOperandVal(1);
if (IntNo == Intrinsic::hexagon_V6_vgathermw ||
IntNo == Intrinsic::hexagon_V6_vgathermw_128B ||
IntNo == Intrinsic::hexagon_V6_vgathermh ||
@@ -665,7 +665,7 @@ void HexagonDAGToDAGISel::SelectIntrinsicWChain(SDNode *N) {
}
void HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
- unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IID = N->getConstantOperandVal(0);
unsigned Bits;
switch (IID) {
case Intrinsic::hexagon_S2_vsplatrb:
diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp
index e08566718d7cd7..fb156f2583e8a9 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp
@@ -2895,7 +2895,7 @@ void HexagonDAGToDAGISel::SelectV65GatherPred(SDNode *N) {
SDValue ImmOperand = CurDAG->getTargetConstant(0, dl, MVT::i32);
unsigned Opcode;
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned IntNo = N->getConstantOperandVal(1);
switch (IntNo) {
default:
llvm_unreachable("Unexpected HVX gather intrinsic.");
@@ -2934,7 +2934,7 @@ void HexagonDAGToDAGISel::SelectV65Gather(SDNode *N) {
SDValue ImmOperand = CurDAG->getTargetConstant(0, dl, MVT::i32);
unsigned Opcode;
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned IntNo = N->getConstantOperandVal(1);
switch (IntNo) {
default:
llvm_unreachable("Unexpected HVX gather intrinsic.");
@@ -2963,7 +2963,7 @@ void HexagonDAGToDAGISel::SelectV65Gather(SDNode *N) {
}
void HexagonDAGToDAGISel::SelectHVXDualOutput(SDNode *N) {
- unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IID = N->getConstantOperandVal(0);
SDNode *Result;
switch (IID) {
case Intrinsic::hexagon_V6_vaddcarry: {
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index a7d452e7227d79..51138091f4a558 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -669,8 +669,7 @@ HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
--NumOps; // Ignore the flag operand.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
- const InlineAsm::Flag Flags(
- cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue());
+ const InlineAsm::Flag Flags(Op.getConstantOperandVal(i));
unsigned NumVals = Flags.getNumOperandRegisters();
++i; // Skip the ID value.
@@ -729,7 +728,7 @@ SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ unsigned IntNo = Op.getConstantOperandVal(1);
// Lower the hexagon_prefetch builtin to DCFETCH, as above.
if (IntNo == Intrinsic::hexagon_prefetch) {
SDValue Addr = Op.getOperand(2);
@@ -1176,7 +1175,7 @@ HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDLoc dl(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
if (Depth) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
@@ -1198,7 +1197,7 @@ HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDLoc dl(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
HRI.getFrameRegister(), VT);
while (Depth--)
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
index db416a500f597b..665e2d79c83d10 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
@@ -2127,7 +2127,7 @@ HexagonTargetLowering::LowerHvxFunnelShift(SDValue Op,
SDValue
HexagonTargetLowering::LowerHvxIntrinsic(SDValue Op, SelectionDAG &DAG) const {
const SDLoc &dl(Op);
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IntNo = Op.getConstantOperandVal(0);
SmallVector<SDValue> Ops(Op->ops().begin(), Op->ops().end());
auto Swap = [&](SDValue P) {
@@ -2922,7 +2922,7 @@ SDValue
HexagonTargetLowering::RemoveTLWrapper(SDValue Op, SelectionDAG &DAG) const {
assert(Op.getOpcode() == HexagonISD::TL_EXTEND ||
Op.getOpcode() == HexagonISD::TL_TRUNCATE);
- unsigned Opc = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ unsigned Opc = Op.getConstantOperandVal(2);
return DAG.getNode(Opc, SDLoc(Op), ty(Op), Op.getOperand(0));
}
diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
index cbb5c2b998e27a..17d7ffb586f4ef 100644
--- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
+++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
@@ -1057,7 +1057,7 @@ SDValue LanaiTargetLowering::LowerRETURNADDR(SDValue Op,
EVT VT = Op.getValueType();
SDLoc DL(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
if (Depth) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
const unsigned Offset = -4;
@@ -1080,7 +1080,7 @@ SDValue LanaiTargetLowering::LowerFRAMEADDR(SDValue Op,
EVT VT = Op.getValueType();
SDLoc DL(Op);
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, Lanai::FP, VT);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
while (Depth--) {
const unsigned Offset = -8;
SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 80853ee3198771..e14bbadf9ed227 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -588,7 +588,7 @@ SDValue LoongArchTargetLowering::lowerFRAMEADDR(SDValue Op,
EVT VT = Op.getValueType();
SDLoc DL(Op);
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
int GRLenInBytes = Subtarget.getGRLen() / 8;
while (Depth--) {
@@ -607,7 +607,7 @@ SDValue LoongArchTargetLowering::lowerRETURNADDR(SDValue Op,
return SDValue();
// Currently only support lowering return address for current frame.
- if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) {
+ if (Op.getConstantOperandVal(0) != 0) {
DAG.getContext()->emitError(
"return address can only be determined for the current frame");
return SDValue();
@@ -1263,7 +1263,7 @@ LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
return emitIntrinsicWithChainErrorMessage(Op, ErrorMsgReqLA64, DAG);
case Intrinsic::loongarch_csrrd_w:
case Intrinsic::loongarch_csrrd_d: {
- unsigned Imm = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ unsigned Imm = Op.getConstantOperandVal(2);
return !isUInt<14>(Imm)
? emitIntrinsicWithChainErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::CSRRD, DL, {GRLenVT, MVT::Other},
@@ -1271,7 +1271,7 @@ LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
}
case Intrinsic::loongarch_csrwr_w:
case Intrinsic::loongarch_csrwr_d: {
- unsigned Imm = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
+ unsigned Imm = Op.getConstantOperandVal(3);
return !isUInt<14>(Imm)
? emitIntrinsicWithChainErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::CSRWR, DL, {GRLenVT, MVT::Other},
@@ -1280,7 +1280,7 @@ LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
}
case Intrinsic::loongarch_csrxchg_w:
case Intrinsic::loongarch_csrxchg_d: {
- unsigned Imm = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
+ unsigned Imm = Op.getConstantOperandVal(4);
return !isUInt<14>(Imm)
? emitIntrinsicWithChainErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::CSRXCHG, DL, {GRLenVT, MVT::Other},
@@ -1306,7 +1306,7 @@ LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
{Chain, Op.getOperand(2)});
}
case Intrinsic::loongarch_lddir_d: {
- unsigned Imm = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
+ unsigned Imm = Op.getConstantOperandVal(3);
return !isUInt<8>(Imm)
? emitIntrinsicWithChainErrorMessage(Op, ErrorMsgOOR, DAG)
: Op;
@@ -1314,7 +1314,7 @@ LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::loongarch_movfcsr2gr: {
if (!Subtarget.hasBasicF())
return emitIntrinsicWithChainErrorMessage(Op, ErrorMsgReqF, DAG);
- unsigned Imm = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ unsigned Imm = Op.getConstantOperandVal(2);
return !isUInt<2>(Imm)
? emitIntrinsicWithChainErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::MOVFCSR2GR, DL, {VT, MVT::Other},
@@ -1460,7 +1460,7 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
ASRT_LE_GT_CASE(asrtgt_d)
#undef ASRT_LE_GT_CASE
case Intrinsic::loongarch_ldpte_d: {
- unsigned Imm = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
+ unsigned Imm = Op.getConstantOperandVal(3);
return !Subtarget.is64Bit()
? emitIntrinsicErrorMessage(Op, ErrorMsgReqLA64, DAG)
: !isUInt<8>(Imm) ? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
@@ -1473,53 +1473,53 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
: SDValue();
case Intrinsic::loongarch_lasx_xvstelm_b:
return (!isInt<8>(cast<ConstantSDNode>(Op.getOperand(4))->getSExtValue()) ||
- !isUInt<5>(cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue()))
+ !isUInt<5>(Op.getConstantOperandVal(5)))
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: SDValue();
case Intrinsic::loongarch_lsx_vstelm_b:
return (!isInt<8>(cast<ConstantSDNode>(Op.getOperand(4))->getSExtValue()) ||
- !isUInt<4>(cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue()))
+ !isUInt<4>(Op.getConstantOperandVal(5)))
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: SDValue();
case Intrinsic::loongarch_lasx_xvstelm_h:
return (!isShiftedInt<8, 1>(
cast<ConstantSDNode>(Op.getOperand(4))->getSExtValue()) ||
- !isUInt<4>(cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue()))
+ !isUInt<4>(Op.getConstantOperandVal(5)))
? emitIntrinsicErrorMessage(
Op, "argument out of range or not a multiple of 2", DAG)
: SDValue();
case Intrinsic::loongarch_lsx_vstelm_h:
return (!isShiftedInt<8, 1>(
cast<ConstantSDNode>(Op.getOperand(4))->getSExtValue()) ||
- !isUInt<3>(cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue()))
+ !isUInt<3>(Op.getConstantOperandVal(5)))
? emitIntrinsicErrorMessage(
Op, "argument out of range or not a multiple of 2", DAG)
: SDValue();
case Intrinsic::loongarch_lasx_xvstelm_w:
return (!isShiftedInt<8, 2>(
cast<ConstantSDNode>(Op.getOperand(4))->getSExtValue()) ||
- !isUInt<3>(cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue()))
+ !isUInt<3>(Op.getConstantOperandVal(5)))
? emitIntrinsicErrorMessage(
Op, "argument out of range or not a multiple of 4", DAG)
: SDValue();
case Intrinsic::loongarch_lsx_vstelm_w:
return (!isShiftedInt<8, 2>(
cast<ConstantSDNode>(Op.getOperand(4))->getSExtValue()) ||
- !isUInt<2>(cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue()))
+ !isUInt<2>(Op.getConstantOperandVal(5)))
? emitIntrinsicErrorMessage(
Op, "argument out of range or not a multiple of 4", DAG)
: SDValue();
case Intrinsic::loongarch_lasx_xvstelm_d:
return (!isShiftedInt<8, 3>(
cast<ConstantSDNode>(Op.getOperand(4))->getSExtValue()) ||
- !isUInt<2>(cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue()))
+ !isUInt<2>(Op.getConstantOperandVal(5)))
? emitIntrinsicErrorMessage(
Op, "argument out of range or not a multiple of 8", DAG)
: SDValue();
case Intrinsic::loongarch_lsx_vstelm_d:
return (!isShiftedInt<8, 3>(
cast<ConstantSDNode>(Op.getOperand(4))->getSExtValue()) ||
- !isUInt<1>(cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue()))
+ !isUInt<1>(Op.getConstantOperandVal(5)))
? emitIntrinsicErrorMessage(
Op, "argument out of range or not a multiple of 8", DAG)
: SDValue();
@@ -1692,7 +1692,7 @@ replaceVPICKVE2GRResults(SDNode *Node, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG, const LoongArchSubtarget &Subtarget,
unsigned ResOp) {
const StringRef ErrorMsgOOR = "argument out of range";
- unsigned Imm = cast<ConstantSDNode>(Node->getOperand(2))->getZExtValue();
+ unsigned Imm = Node->getConstantOperandVal(2);
if (!isUInt<N>(Imm)) {
emitErrorAndReplaceIntrinsicResults(Node, Results, DAG, ErrorMsgOOR,
/*WithChain=*/false);
@@ -1995,7 +1995,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
break;
}
case Intrinsic::loongarch_csrwr_w: {
- unsigned Imm = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
+ unsigned Imm = N->getConstantOperandVal(3);
if (!isUInt<14>(Imm)) {
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgOOR);
return;
@@ -2010,7 +2010,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
break;
}
case Intrinsic::loongarch_csrxchg_w: {
- unsigned Imm = cast<ConstantSDNode>(N->getOperand(4))->getZExtValue();
+ unsigned Imm = N->getConstantOperandVal(4);
if (!isUInt<14>(Imm)) {
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgOOR);
return;
diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp
index f42882dafa0956..c4d7a0dec7f390 100644
--- a/llvm/lib/Target/M68k/M68kISelLowering.cpp
+++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp
@@ -2278,8 +2278,7 @@ SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
isNullConstant(Cond.getOperand(1).getOperand(0))) {
SDValue Cmp = Cond.getOperand(1);
- unsigned CondCode =
- cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
+ unsigned CondCode = Cond.getConstantOperandVal(0);
if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
(CondCode == M68k::COND_EQ || CondCode == M68k::COND_NE)) {
@@ -3388,7 +3387,7 @@ SDValue M68kTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SDNode *Node = Op.getNode();
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
- unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ unsigned Align = Op.getConstantOperandVal(2);
EVT VT = Node->getValueType(0);
// Chain the dynamic stack allocation so that it doesn't modify the stack
diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index ee7762c296bf50..d3b59138a5a95e 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -964,7 +964,7 @@ SDValue MSP430TargetLowering::LowerShifts(SDValue Op,
if (!isa<ConstantSDNode>(N->getOperand(1)))
return Op;
- uint64_t ShiftAmount = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ uint64_t ShiftAmount = N->getConstantOperandVal(1);
// Expand the stuff into sequence of shifts.
SDValue Victim = N->getOperand(0);
@@ -1269,7 +1269,7 @@ SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op,
if (verifyReturnAddressArgumentIsConstant(Op, DAG))
return SDValue();
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
SDLoc dl(Op);
EVT PtrVT = Op.getValueType();
@@ -1295,7 +1295,7 @@ SDValue MSP430TargetLowering::LowerFRAMEADDR(SDValue Op,
EVT VT = Op.getValueType();
SDLoc dl(Op); // FIXME probably not meaningful
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
MSP430::R4, VT);
while (Depth--)
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index a0cab80243868e..483eba4e4f4794 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -2508,7 +2508,7 @@ SDValue MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
SDValue MipsTargetLowering::
lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
// check the depth
- if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) {
+ if (Op.getConstantOperandVal(0) != 0) {
DAG.getContext()->emitError(
"return address can be determined only for current frame");
return SDValue();
@@ -2529,7 +2529,7 @@ SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
return SDValue();
// check the depth
- if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) {
+ if (Op.getConstantOperandVal(0) != 0) {
DAG.getContext()->emitError(
"return address can be determined only for current frame");
return SDValue();
diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 8c865afd420792..0ed87ee0809a3e 100644
--- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -831,8 +831,7 @@ bool MipsSEDAGToDAGISel::trySelect(SDNode *Node) {
}
case ISD::INTRINSIC_W_CHAIN: {
- const unsigned IntrinsicOpcode =
- cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ const unsigned IntrinsicOpcode = Node->getConstantOperandVal(1);
switch (IntrinsicOpcode) {
default:
break;
@@ -885,7 +884,7 @@ bool MipsSEDAGToDAGISel::trySelect(SDNode *Node) {
}
case ISD::INTRINSIC_WO_CHAIN: {
- switch (cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue()) {
+ switch (Node->getConstantOperandVal(0)) {
default:
break;
@@ -901,8 +900,7 @@ bool MipsSEDAGToDAGISel::trySelect(SDNode *Node) {
}
case ISD::INTRINSIC_VOID: {
- const unsigned IntrinsicOpcode =
- cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ const unsigned IntrinsicOpcode = Node->getConstantOperandVal(1);
switch (IntrinsicOpcode) {
default:
break;
diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
index 5c34067c88889a..f6ac41ed3479c3 100644
--- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp
@@ -1528,7 +1528,7 @@ static SDValue lowerMSABitClearImm(SDValue Op, SelectionDAG &DAG) {
SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
- unsigned Intrinsic = cast<ConstantSDNode>(Op->getOperand(0))->getZExtValue();
+ unsigned Intrinsic = Op->getConstantOperandVal(0);
switch (Intrinsic) {
default:
return SDValue();
@@ -2300,7 +2300,7 @@ static SDValue lowerMSALoadIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr,
SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
- unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
+ unsigned Intr = Op->getConstantOperandVal(1);
switch (Intr) {
default:
return SDValue();
@@ -2375,7 +2375,7 @@ static SDValue lowerMSAStoreIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr,
SDValue MipsSETargetLowering::lowerINTRINSIC_VOID(SDValue Op,
SelectionDAG &DAG) const {
- unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
+ unsigned Intr = Op->getConstantOperandVal(1);
switch (Intr) {
default:
return SDValue();
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 894a8636f45851..815c46edb6fa2a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -513,7 +513,7 @@ void NVPTXDAGToDAGISel::Select(SDNode *N) {
}
bool NVPTXDAGToDAGISel::tryIntrinsicChain(SDNode *N) {
- unsigned IID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned IID = N->getConstantOperandVal(1);
switch (IID) {
default:
return false;
@@ -730,7 +730,7 @@ static bool canLowerToLDG(MemSDNode *N, const NVPTXSubtarget &Subtarget,
}
bool NVPTXDAGToDAGISel::tryIntrinsicNoChain(SDNode *N) {
- unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IID = N->getConstantOperandVal(0);
switch (IID) {
default:
return false;
@@ -1246,7 +1246,7 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
Op1 = N->getOperand(2);
Mem = cast<MemIntrinsicSDNode>(N);
- unsigned IID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned IID = N->getConstantOperandVal(1);
switch (IID) {
default:
return false;
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index b57d185bb638b8..ed96339240d926 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -4902,8 +4902,7 @@ bool PPCDAGToDAGISel::trySelectLoopCountIntrinsic(SDNode *N) {
return false;
if (LHS.getOperand(0).getOpcode() != ISD::INTRINSIC_W_CHAIN ||
- cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() !=
- Intrinsic::loop_decrement)
+ LHS.getOperand(0).getConstantOperandVal(1) != Intrinsic::loop_decrement)
return false;
if (!isa<ConstantSDNode>(RHS))
@@ -6011,7 +6010,7 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
// Op #3 is the Dest MBB
// Op #4 is the Flag.
// Prevent PPC::PRED_* from being selected into LI.
- unsigned PCC = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned PCC = N->getConstantOperandVal(1);
if (EnableBranchHint)
PCC |= getBranchHint(PCC, *FuncInfo, N->getOperand(3));
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 385b3b74c34d65..8f27e6677afa5e 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2817,8 +2817,8 @@ bool PPCTargetLowering::SelectAddressRegImm(
return true; // [r+i]
} else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
// Match LOAD (ADD (X, Lo(G))).
- assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
- && "Cannot handle constant offsets yet!");
+ assert(!N.getOperand(1).getConstantOperandVal(1) &&
+ "Cannot handle constant offsets yet!");
Disp = N.getOperand(1).getOperand(0); // The global address.
assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
@@ -3824,8 +3824,7 @@ SDValue PPCTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
// Check all operands that may contain the LR.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
- const InlineAsm::Flag Flags(
- cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue());
+ const InlineAsm::Flag Flags(Op.getConstantOperandVal(i));
unsigned NumVals = Flags.getNumOperandRegisters();
++i; // Skip the ID value.
@@ -10442,8 +10441,7 @@ SDValue PPCTargetLowering::LowerVPERM(SDValue Op, SelectionDAG &DAG,
/// information about the intrinsic.
static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
bool &isDot, const PPCSubtarget &Subtarget) {
- unsigned IntrinsicID =
- cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
+ unsigned IntrinsicID = Intrin.getConstantOperandVal(0);
CompareOpc = -1;
isDot = false;
switch (IntrinsicID) {
@@ -10728,8 +10726,7 @@ static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
/// lower, do it, otherwise return null.
SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
- unsigned IntrinsicID =
- cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IntrinsicID = Op.getConstantOperandVal(0);
SDLoc dl(Op);
@@ -10947,7 +10944,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
// Unpack the result based on how the target uses it.
unsigned BitNo; // Bit # of CR6.
bool InvertBit; // Invert result?
- switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
+ switch (Op.getConstantOperandVal(1)) {
default: // Can't happen, don't crash on invalid number though.
case 0: // Return the value of the EQ bit of CR6.
BitNo = 0; InvertBit = false;
@@ -10983,7 +10980,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
// the beginning of the argument list.
int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
SDLoc DL(Op);
- switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
+ switch (Op.getConstantOperandVal(ArgStart)) {
case Intrinsic::ppc_cfence: {
assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
SDValue Val = Op.getOperand(ArgStart + 1);
@@ -11548,7 +11545,7 @@ SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
// Custom lower is only done for high or low doubleword.
- int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
+ int Idx = Op0.getConstantOperandVal(1);
if (Idx % 2 != 0)
return SDValue();
@@ -11717,8 +11714,7 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
break;
}
case ISD::INTRINSIC_W_CHAIN: {
- if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
- Intrinsic::loop_decrement)
+ if (N->getConstantOperandVal(1) != Intrinsic::loop_decrement)
break;
assert(N->getValueType(0) == MVT::i1 &&
@@ -11734,7 +11730,7 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
break;
}
case ISD::INTRINSIC_WO_CHAIN: {
- switch (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()) {
+ switch (N->getConstantOperandVal(0)) {
case Intrinsic::ppc_pack_longdouble:
Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128,
N->getOperand(2), N->getOperand(1)));
@@ -13654,7 +13650,7 @@ static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
EVT VT;
- switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
+ switch (N->getConstantOperandVal(1)) {
default: return false;
case Intrinsic::ppc_altivec_lvx:
case Intrinsic::ppc_altivec_lvxl:
@@ -13682,7 +13678,7 @@ static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
if (N->getOpcode() == ISD::INTRINSIC_VOID) {
EVT VT;
- switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
+ switch (N->getConstantOperandVal(1)) {
default: return false;
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
@@ -15546,8 +15542,7 @@ SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
}
static bool isStoreConditional(SDValue Intrin, unsigned &StoreWidth) {
- unsigned IntrinsicID =
- cast<ConstantSDNode>(Intrin.getOperand(1))->getZExtValue();
+ unsigned IntrinsicID = Intrin.getConstantOperandVal(1);
if (IntrinsicID == Intrinsic::ppc_stdcx)
StoreWidth = 8;
else if (IntrinsicID == Intrinsic::ppc_stwcx)
@@ -15979,7 +15974,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
break;
case ISD::INTRINSIC_WO_CHAIN: {
bool isLittleEndian = Subtarget.isLittleEndian();
- unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IID = N->getConstantOperandVal(0);
Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
: Intrinsic::ppc_altivec_lvsl);
if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) {
@@ -15992,36 +15987,34 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
.zext(Add.getScalarValueSizeInBits()))) {
SDNode *BasePtr = Add->getOperand(0).getNode();
for (SDNode *U : BasePtr->uses()) {
- if (U->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
- cast<ConstantSDNode>(U->getOperand(0))->getZExtValue() == IID) {
- // We've found another LVSL/LVSR, and this address is an aligned
- // multiple of that one. The results will be the same, so use the
- // one we've just found instead.
+ if (U->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
+ U->getConstantOperandVal(0) == IID) {
+ // We've found another LVSL/LVSR, and this address is an aligned
+ // multiple of that one. The results will be the same, so use the
+ // one we've just found instead.
- return SDValue(U, 0);
- }
+ return SDValue(U, 0);
+ }
}
}
if (isa<ConstantSDNode>(Add->getOperand(1))) {
SDNode *BasePtr = Add->getOperand(0).getNode();
for (SDNode *U : BasePtr->uses()) {
- if (U->getOpcode() == ISD::ADD &&
- isa<ConstantSDNode>(U->getOperand(1)) &&
- (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
- cast<ConstantSDNode>(U->getOperand(1))->getZExtValue()) %
- (1ULL << Bits) ==
- 0) {
- SDNode *OtherAdd = U;
- for (SDNode *V : OtherAdd->uses()) {
- if (V->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
- cast<ConstantSDNode>(V->getOperand(0))->getZExtValue() ==
- IID) {
- return SDValue(V, 0);
- }
+ if (U->getOpcode() == ISD::ADD &&
+ isa<ConstantSDNode>(U->getOperand(1)) &&
+ (Add->getConstantOperandVal(1) - U->getConstantOperandVal(1)) %
+ (1ULL << Bits) ==
+ 0) {
+ SDNode *OtherAdd = U;
+ for (SDNode *V : OtherAdd->uses()) {
+ if (V->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
+ V->getConstantOperandVal(0) == IID) {
+ return SDValue(V, 0);
}
}
}
+ }
}
}
@@ -16061,30 +16054,30 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
break;
case ISD::INTRINSIC_W_CHAIN:
- switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
- default:
- break;
- case Intrinsic::ppc_altivec_vsum4sbs:
- case Intrinsic::ppc_altivec_vsum4shs:
- case Intrinsic::ppc_altivec_vsum4ubs: {
- // These sum-across intrinsics only have a chain due to the side effect
- // that they may set the SAT bit. If we know the SAT bit will not be set
- // for some inputs, we can replace any uses of their chain with the input
- // chain.
- if (BuildVectorSDNode *BVN =
- dyn_cast<BuildVectorSDNode>(N->getOperand(3))) {
- APInt APSplatBits, APSplatUndef;
- unsigned SplatBitSize;
- bool HasAnyUndefs;
- bool BVNIsConstantSplat = BVN->isConstantSplat(
- APSplatBits, APSplatUndef, SplatBitSize, HasAnyUndefs, 0,
- !Subtarget.isLittleEndian());
- // If the constant splat vector is 0, the SAT bit will not be set.
- if (BVNIsConstantSplat && APSplatBits == 0)
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), N->getOperand(0));
+ switch (N->getConstantOperandVal(1)) {
+ default:
+ break;
+ case Intrinsic::ppc_altivec_vsum4sbs:
+ case Intrinsic::ppc_altivec_vsum4shs:
+ case Intrinsic::ppc_altivec_vsum4ubs: {
+ // These sum-across intrinsics only have a chain due to the side effect
+ // that they may set the SAT bit. If we know the SAT bit will not be set
+ // for some inputs, we can replace any uses of their chain with the
+ // input chain.
+ if (BuildVectorSDNode *BVN =
+ dyn_cast<BuildVectorSDNode>(N->getOperand(3))) {
+ APInt APSplatBits, APSplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ bool BVNIsConstantSplat = BVN->isConstantSplat(
+ APSplatBits, APSplatUndef, SplatBitSize, HasAnyUndefs, 0,
+ !Subtarget.isLittleEndian());
+ // If the constant splat vector is 0, the SAT bit will not be set.
+ if (BVNIsConstantSplat && APSplatBits == 0)
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), N->getOperand(0));
+ }
+ return SDValue();
}
- return SDValue();
- }
case Intrinsic::ppc_vsx_lxvw4x:
case Intrinsic::ppc_vsx_lxvd2x:
// For little endian, VSX loads require generating lxvd2x/xxswapd.
@@ -16098,7 +16091,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
// For little endian, VSX stores require generating xxswapd/stxvd2x.
// Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
if (Subtarget.needsSwapsForVSXMemOps()) {
- switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
+ switch (N->getConstantOperandVal(1)) {
default:
break;
case Intrinsic::ppc_vsx_stxvw4x:
@@ -16327,7 +16320,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
// Unpack the result based on how the target uses it.
PPC::Predicate CompOpc;
- switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
+ switch (LHS.getConstantOperandVal(1)) {
default: // Can't happen, don't crash on invalid number though.
case 0: // Branch on the value of the EQ bit of CR6.
CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
@@ -16406,7 +16399,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
break;
}
case ISD::INTRINSIC_WO_CHAIN: {
- switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
+ switch (Op.getConstantOperandVal(0)) {
default: break;
case Intrinsic::ppc_altivec_vcmpbfp_p:
case Intrinsic::ppc_altivec_vcmpeqfp_p:
@@ -16433,7 +16426,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
break;
}
case ISD::INTRINSIC_W_CHAIN: {
- switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
+ switch (Op.getConstantOperandVal(1)) {
default:
break;
case Intrinsic::ppc_load2r:
@@ -16868,7 +16861,7 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
return SDValue();
SDLoc dl(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
// Make sure the function does not optimize away the store of the RA to
// the stack.
@@ -16901,7 +16894,7 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
@@ -18086,8 +18079,7 @@ static void computeFlagsForAddressComputation(SDValue N, unsigned &FlagSet,
FlagSet |= PPC::MOF_RPlusSImm34; // Signed 34-bit immediates.
else
FlagSet |= PPC::MOF_RPlusR; // Register.
- } else if (RHS.getOpcode() == PPCISD::Lo &&
- !cast<ConstantSDNode>(RHS.getOperand(1))->getZExtValue())
+ } else if (RHS.getOpcode() == PPCISD::Lo && !RHS.getConstantOperandVal(1))
FlagSet |= PPC::MOF_RPlusLo; // PPCISD::Lo.
else
FlagSet |= PPC::MOF_RPlusR;
@@ -18131,7 +18123,7 @@ unsigned PPCTargetLowering::computeMOFlags(const SDNode *Parent, SDValue N,
unsigned ParentOp = Parent->getOpcode();
if (Subtarget.isISA3_1() && ((ParentOp == ISD::INTRINSIC_W_CHAIN) ||
(ParentOp == ISD::INTRINSIC_VOID))) {
- unsigned ID = cast<ConstantSDNode>(Parent->getOperand(1))->getZExtValue();
+ unsigned ID = Parent->getConstantOperandVal(1);
if ((ID == Intrinsic::ppc_vsx_lxvp) || (ID == Intrinsic::ppc_vsx_stxvp)) {
SDValue IntrinOp = (ID == Intrinsic::ppc_vsx_lxvp)
? Parent->getOperand(2)
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 098a320c91533b..bfa3bf3cc74e2b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1360,7 +1360,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
break;
- uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
+ uint64_t C2 = N0.getConstantOperandVal(1);
// Constant should be a mask.
if (!isMask_64(C2))
@@ -1604,7 +1604,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
break;
}
case ISD::INTRINSIC_W_CHAIN: {
- unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ unsigned IntNo = Node->getConstantOperandVal(1);
switch (IntNo) {
// By default we do not custom select any intrinsic.
default:
@@ -1825,7 +1825,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
break;
}
case ISD::INTRINSIC_VOID: {
- unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ unsigned IntNo = Node->getConstantOperandVal(1);
switch (IntNo) {
case Intrinsic::riscv_vsseg2:
case Intrinsic::riscv_vsseg3:
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 51580d15451ca2..03a59f8a8b57ca 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -7235,7 +7235,7 @@ SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
EVT VT = Op.getValueType();
SDLoc DL(Op);
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
while (Depth--) {
int Offset = -(XLenInBytes * 2);
SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
@@ -7260,7 +7260,7 @@ SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
EVT VT = Op.getValueType();
SDLoc DL(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
if (Depth) {
int Off = -XLenInBytes;
SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
@@ -11731,7 +11731,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
break;
}
case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IntNo = N->getConstantOperandVal(0);
switch (IntNo) {
default:
llvm_unreachable(
@@ -14153,7 +14153,7 @@ static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
for (SDNode *U : N0->uses()) {
if (U->getOpcode() != ISD::SRA ||
!isa<ConstantSDNode>(U->getOperand(1)) ||
- cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() > 32)
+ U->getConstantOperandVal(1) > 32)
return SDValue();
}
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 4f080147921105..78bdf3ae9a84ba 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -2050,7 +2050,7 @@ static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
SDValue CMPCC = LHS.getOperand(3);
- SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
+ SPCC = LHS.getConstantOperandVal(2);
LHS = CMPCC.getOperand(0);
RHS = CMPCC.getOperand(1);
}
@@ -3186,7 +3186,7 @@ static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
SDValue SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IntNo = Op.getConstantOperandVal(0);
SDLoc dl(Op);
switch (IntNo) {
default: return SDValue(); // Don't custom lower most intrinsics.
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 559f2ca476d709..045c4c0aac07ae 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -2186,7 +2186,7 @@ SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
// the mask of valid CC values if so.
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode,
unsigned &CCValid) {
- unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ unsigned Id = Op.getConstantOperandVal(1);
switch (Id) {
case Intrinsic::s390_tbegin:
Opcode = SystemZISD::TBEGIN;
@@ -2212,7 +2212,7 @@ static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode,
// CC value as its final argument. Provide the associated SystemZISD
// opcode and the mask of valid CC values if so.
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) {
- unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Id = Op.getConstantOperandVal(0);
switch (Id) {
case Intrinsic::s390_vpkshs:
case Intrinsic::s390_vpksfs:
@@ -2600,10 +2600,9 @@ static bool shouldSwapCmpOperands(const Comparison &C) {
return true;
if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND)
return true;
- if (C.ICmpType != SystemZICMP::SignedOnly &&
- Opcode0 == ISD::AND &&
+ if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::AND &&
C.Op0.getOperand(1).getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
+ C.Op0.getConstantOperandVal(1) == 0xffffffff)
return true;
return false;
@@ -3429,11 +3428,9 @@ SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
return (Neg.getOpcode() == ISD::SUB &&
Neg.getOperand(0).getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 &&
- Neg.getOperand(1) == Pos &&
- (Pos == CmpOp ||
- (Pos.getOpcode() == ISD::SIGN_EXTEND &&
- Pos.getOperand(0) == CmpOp)));
+ Neg.getConstantOperandVal(0) == 0 && Neg.getOperand(1) == Pos &&
+ (Pos == CmpOp || (Pos.getOpcode() == ISD::SIGN_EXTEND &&
+ Pos.getOperand(0) == CmpOp)));
}
// Return the absolute or negative absolute of Op; IsNegative decides which.
@@ -3740,7 +3737,7 @@ SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op,
MFI.setFrameAddressIsTaken(true);
SDLoc DL(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
// By definition, the frame address is the address of the back chain. (In
@@ -3776,7 +3773,7 @@ SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
return SDValue();
SDLoc DL(Op);
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Depth = Op.getConstantOperandVal(0);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
if (Depth > 0) {
@@ -4226,7 +4223,7 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
if (HighOp.getOpcode() == ISD::AND &&
HighOp.getOperand(1).getOpcode() == ISD::Constant) {
SDValue HighOp0 = HighOp.getOperand(0);
- uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
+ uint64_t Mask = HighOp.getConstantOperandVal(1);
if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
HighOp = HighOp0;
}
@@ -4485,10 +4482,10 @@ SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
- AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
- cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
- SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
- cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
+ AtomicOrdering FenceOrdering =
+ static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
+ SyncScope::ID FenceSSID =
+ static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
@@ -4773,13 +4770,13 @@ SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
SelectionDAG &DAG) const {
- bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
+ bool IsData = Op.getConstantOperandVal(4);
if (!IsData)
// Just preserve the chain.
return Op.getOperand(0);
SDLoc DL(Op);
- bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ bool IsWrite = Op.getConstantOperandVal(2);
unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
SDValue Ops[] = {Op.getOperand(0), DAG.getTargetConstant(Code, DL, MVT::i32),
@@ -4825,7 +4822,7 @@ SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue(Node, 0), getCCResult(DAG, SDValue(Node, 1)));
}
- unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Id = Op.getConstantOperandVal(0);
switch (Id) {
case Intrinsic::thread_pointer:
return lowerThreadPointer(SDLoc(Op), DAG);
@@ -5628,7 +5625,7 @@ static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
Op = Op.getOperand(0);
if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
Op.getOperand(1).getOpcode() == ISD::Constant) {
- unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ unsigned Elem = Op.getConstantOperandVal(1);
if (!GS.add(Op.getOperand(0), Elem))
return SDValue();
FoundOne = true;
@@ -6727,8 +6724,7 @@ SDValue SystemZTargetLowering::combineLOAD(
int Index = 1;
if (User->getOpcode() == ISD::SRL &&
User->getOperand(1).getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(User->getOperand(1))->getZExtValue() == 64 &&
- User->hasOneUse()) {
+ User->getConstantOperandVal(1) == 64 && User->hasOneUse()) {
User = *User->use_begin();
Index = 0;
}
@@ -6857,7 +6853,7 @@ static bool isMovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart) {
std::swap(Op0, Op1);
if (Op1.getOpcode() != ISD::SHL || !Op1.getNode()->hasOneUse() ||
Op1.getOperand(1).getOpcode() != ISD::Constant ||
- cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue() != 64)
+ Op1.getConstantOperandVal(1) != 64)
return false;
Op1 = Op1.getOperand(0);
@@ -7149,20 +7145,18 @@ SDValue SystemZTargetLowering::combineFP_ROUND(
unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0;
SelectionDAG &DAG = DCI.DAG;
SDValue Op0 = N->getOperand(OpNo);
- if (N->getValueType(0) == MVT::f32 &&
- Op0.hasOneUse() &&
+ if (N->getValueType(0) == MVT::f32 && Op0.hasOneUse() &&
Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
Op0.getOperand(0).getValueType() == MVT::v2f64 &&
Op0.getOperand(1).getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
+ Op0.getConstantOperandVal(1) == 0) {
SDValue Vec = Op0.getOperand(0);
for (auto *U : Vec->uses()) {
- if (U != Op0.getNode() &&
- U->hasOneUse() &&
+ if (U != Op0.getNode() && U->hasOneUse() &&
U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
U->getOperand(0) == Vec &&
U->getOperand(1).getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) {
+ U->getConstantOperandVal(1) == 1) {
SDValue OtherRound = SDValue(*U->use_begin(), 0);
if (OtherRound.getOpcode() == N->getOpcode() &&
OtherRound.getOperand(OpNo) == SDValue(U, 0) &&
@@ -7215,20 +7209,18 @@ SDValue SystemZTargetLowering::combineFP_EXTEND(
unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0;
SelectionDAG &DAG = DCI.DAG;
SDValue Op0 = N->getOperand(OpNo);
- if (N->getValueType(0) == MVT::f64 &&
- Op0.hasOneUse() &&
+ if (N->getValueType(0) == MVT::f64 && Op0.hasOneUse() &&
Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
Op0.getOperand(0).getValueType() == MVT::v4f32 &&
Op0.getOperand(1).getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
+ Op0.getConstantOperandVal(1) == 0) {
SDValue Vec = Op0.getOperand(0);
for (auto *U : Vec->uses()) {
- if (U != Op0.getNode() &&
- U->hasOneUse() &&
+ if (U != Op0.getNode() && U->hasOneUse() &&
U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
U->getOperand(0) == Vec &&
U->getOperand(1).getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 2) {
+ U->getConstantOperandVal(1) == 2) {
SDValue OtherExtend = SDValue(*U->use_begin(), 0);
if (OtherExtend.getOpcode() == N->getOpcode() &&
OtherExtend.getOperand(OpNo) == SDValue(U, 0) &&
@@ -7605,7 +7597,7 @@ SDValue SystemZTargetLowering::combineINTRINSIC(
SDNode *N, DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
- unsigned Id = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
+ unsigned Id = N->getConstantOperandVal(1);
switch (Id) {
// VECTOR LOAD (RIGHTMOST) WITH LENGTH with a length operand of 15
// or larger is simply a vector load.
@@ -7679,7 +7671,7 @@ static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts,
APInt SrcDemE;
unsigned Opcode = Op.getOpcode();
if (Opcode == ISD::INTRINSIC_WO_CHAIN) {
- unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Id = Op.getConstantOperandVal(0);
switch (Id) {
case Intrinsic::s390_vpksh: // PACKS
case Intrinsic::s390_vpksf:
@@ -7723,7 +7715,7 @@ static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts,
SrcDemE = APInt(NumElts, 0);
if (!DemandedElts[OpNo - 1])
break;
- unsigned Mask = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
+ unsigned Mask = Op.getConstantOperandVal(3);
unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
// Demand input element 0 or 1, given by the mask bit value.
SrcDemE.setBit((Mask & MaskBit)? 1 : 0);
@@ -7732,7 +7724,7 @@ static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts,
case Intrinsic::s390_vsldb: {
// VECTOR SHIFT LEFT DOUBLE BY BYTE
assert(VT == MVT::v16i8 && "Unexpected type.");
- unsigned FirstIdx = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
+ unsigned FirstIdx = Op.getConstantOperandVal(3);
assert (FirstIdx > 0 && FirstIdx < 16 && "Unused operand.");
unsigned NumSrc0Els = 16 - FirstIdx;
SrcDemE = APInt(NumElts, 0);
@@ -7808,7 +7800,7 @@ SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
unsigned Opcode = Op.getOpcode();
if (Opcode == ISD::INTRINSIC_WO_CHAIN) {
bool IsLogical = false;
- unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Id = Op.getConstantOperandVal(0);
switch (Id) {
case Intrinsic::s390_vpksh: // PACKS
case Intrinsic::s390_vpksf:
@@ -7908,7 +7900,7 @@ SystemZTargetLowering::ComputeNumSignBitsForTargetNode(
return 1;
unsigned Opcode = Op.getOpcode();
if (Opcode == ISD::INTRINSIC_WO_CHAIN) {
- unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned Id = Op.getConstantOperandVal(0);
switch (Id) {
case Intrinsic::s390_vpksh: // PACKS
case Intrinsic::s390_vpksf:
diff --git a/llvm/lib/Target/SystemZ/SystemZOperators.td b/llvm/lib/Target/SystemZ/SystemZOperators.td
index af6cf340f8a32c..d98bb886c18506 100644
--- a/llvm/lib/Target/SystemZ/SystemZOperators.td
+++ b/llvm/lib/Target/SystemZ/SystemZOperators.td
@@ -507,11 +507,11 @@ def z_subcarry : PatFrag<(ops node:$lhs, node:$rhs),
// Signed and unsigned comparisons.
def z_scmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, timm), [{
- unsigned Type = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ unsigned Type = N->getConstantOperandVal(2);
return Type != SystemZICMP::UnsignedOnly;
}]>;
def z_ucmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, timm), [{
- unsigned Type = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ unsigned Type = N->getConstantOperandVal(2);
return Type != SystemZICMP::SignedOnly;
}]>;
diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp
index 0267aefd1e914e..0e41a2d7aa03eb 100644
--- a/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -1101,10 +1101,10 @@ Instruction *VETargetLowering::emitTrailingFence(IRBuilderBase &Builder,
SDValue VETargetLowering::lowerATOMIC_FENCE(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
- AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
- cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
- SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
- cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
+ AtomicOrdering FenceOrdering =
+ static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
+ SyncScope::ID FenceSSID =
+ static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
// VE uses Release consistency, so need a fence instruction if it is a
// cross-thread fence.
@@ -1766,7 +1766,7 @@ static SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
SDValue VETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IntNo = Op.getConstantOperandVal(0);
switch (IntNo) {
default: // Don't custom lower most intrinsics.
return SDValue();
@@ -2937,8 +2937,8 @@ static bool isI32Insn(const SDNode *User, const SDNode *N) {
if (User->getOperand(1).getNode() != N &&
User->getOperand(2).getNode() != N &&
isa<ConstantSDNode>(User->getOperand(3))) {
- VECC::CondCode VECCVal = static_cast<VECC::CondCode>(
- cast<ConstantSDNode>(User->getOperand(3))->getZExtValue());
+ VECC::CondCode VECCVal =
+ static_cast<VECC::CondCode>(User->getConstantOperandVal(3));
return isIntVECondCode(VECCVal);
}
[[fallthrough]];
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 77a997588c4fee..846eab93e1fea4 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -5233,7 +5233,7 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
break;
case X86ISD::VPTERNLOG: {
- uint8_t Imm = cast<ConstantSDNode>(Node->getOperand(3))->getZExtValue();
+ uint8_t Imm = Node->getConstantOperandVal(3);
if (matchVPTERNLOG(Node, Node, Node, Node, Node->getOperand(0),
Node->getOperand(1), Node->getOperand(2), Imm))
return;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a90ddf132c3897..1e4b1361f98a6f 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -31758,7 +31758,7 @@ static SDValue LowerCVTPS2PH(SDValue Op, SelectionDAG &DAG) {
static SDValue LowerPREFETCH(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- unsigned IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
+ unsigned IsData = Op.getConstantOperandVal(4);
// We don't support non-data prefetch without PREFETCHI.
// Just preserve the chain.
diff --git a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
index 1288597fc6b01e..05003ec304adc5 100644
--- a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
@@ -250,7 +250,7 @@ bool XCoreDAGToDAGISel::tryBRIND(SDNode *N) {
SDValue Addr = N->getOperand(1);
if (Addr->getOpcode() != ISD::INTRINSIC_W_CHAIN)
return false;
- unsigned IntNo = cast<ConstantSDNode>(Addr->getOperand(1))->getZExtValue();
+ unsigned IntNo = Addr->getConstantOperandVal(1);
if (IntNo != Intrinsic::xcore_checkevent)
return false;
SDValue nextAddr = Addr->getOperand(2);
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 7736adab19e899..46a1da26aa6b70 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -767,7 +767,7 @@ SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
// An index of zero corresponds to the current function's frame address.
// An index of one to the parent's frame address, and so on.
// Depths > 0 not supported yet!
- if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
+ if (Op.getConstantOperandVal(0) > 0)
return SDValue();
MachineFunction &MF = DAG.getMachineFunction();
@@ -783,7 +783,7 @@ LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
// An index of zero corresponds to the current function's return address.
// An index of one to the parent's return address, and so on.
// Depths > 0 not supported yet!
- if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
+ if (Op.getConstantOperandVal(0) > 0)
return SDValue();
MachineFunction &MF = DAG.getMachineFunction();
@@ -905,7 +905,7 @@ LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
SDValue XCoreTargetLowering::
LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned IntNo = Op.getConstantOperandVal(0);
switch (IntNo) {
case Intrinsic::xcore_crc8:
EVT VT = Op.getValueType();
@@ -1497,7 +1497,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
switch (N->getOpcode()) {
default: break;
case ISD::INTRINSIC_VOID:
- switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
+ switch (N->getConstantOperandVal(1)) {
case Intrinsic::xcore_outt:
case Intrinsic::xcore_outct:
case Intrinsic::xcore_chkct: {
@@ -1733,29 +1733,29 @@ void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
break;
case ISD::INTRINSIC_W_CHAIN:
{
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
- switch (IntNo) {
- case Intrinsic::xcore_getts:
- // High bits are known to be zero.
- Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
- Known.getBitWidth() - 16);
- break;
- case Intrinsic::xcore_int:
- case Intrinsic::xcore_inct:
- // High bits are known to be zero.
- Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
- Known.getBitWidth() - 8);
- break;
- case Intrinsic::xcore_testct:
- // Result is either 0 or 1.
- Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
- Known.getBitWidth() - 1);
- break;
- case Intrinsic::xcore_testwct:
- // Result is in the range 0 - 4.
- Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
- Known.getBitWidth() - 3);
- break;
+ unsigned IntNo = Op.getConstantOperandVal(1);
+ switch (IntNo) {
+ case Intrinsic::xcore_getts:
+ // High bits are known to be zero.
+ Known.Zero =
+ APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 16);
+ break;
+ case Intrinsic::xcore_int:
+ case Intrinsic::xcore_inct:
+ // High bits are known to be zero.
+ Known.Zero =
+ APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 8);
+ break;
+ case Intrinsic::xcore_testct:
+ // Result is either 0 or 1.
+ Known.Zero =
+ APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 1);
+ break;
+ case Intrinsic::xcore_testwct:
+ // Result is in the range 0 - 4.
+ Known.Zero =
+ APInt::getHighBitsSet(Known.getBitWidth(), Known.getBitWidth() - 3);
+ break;
}
}
break;
>From 38a59edf28da188ac63aa8ea31c4e2e083e2f46c Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Tue, 2 Jan 2024 11:27:24 +0000
Subject: [PATCH 2/2] [SelectionDAG] Add and use SDNode::getAsConstantVal()
helper
This follows on from #76708, allowing
`cast<ConstantSDNode>(N)->getZExtValue()` to be replaced with just
`N2->getAsConstantVal();`
Introduced via `git grep -l "cast<ConstantSDNode>\(.*\).*getZExtValue" |
xargs sed -E -i
's/cast<ConstantSDNode>\((.*)\)->getZExtValue/\1->getAsConstantVal/'`
and then using `git clang-format` on the result.
---
llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 5 +++
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 2 +-
.../lib/CodeGen/SelectionDAG/InstrEmitter.cpp | 4 +--
.../SelectionDAG/LegalizeFloatTypes.cpp | 2 +-
.../SelectionDAG/LegalizeIntegerTypes.cpp | 2 +-
.../SelectionDAG/LegalizeVectorTypes.cpp | 10 +++---
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 17 +++++-----
.../SelectionDAG/SelectionDAGBuilder.cpp | 34 ++++++++-----------
.../SelectionDAG/SelectionDAGDumper.cpp | 2 +-
.../CodeGen/SelectionDAG/SelectionDAGISel.cpp | 15 ++++----
.../Target/AArch64/AArch64ISelDAGToDAG.cpp | 6 ++--
.../Target/AArch64/AArch64ISelLowering.cpp | 10 +++---
.../AArch64/AArch64SelectionDAGInfo.cpp | 2 +-
llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/AMDGPU/R600ISelLowering.cpp | 8 ++---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 6 ++--
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 4 +--
llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp | 25 +++++++-------
llvm/lib/Target/ARM/ARMISelLowering.cpp | 19 +++++------
llvm/lib/Target/AVR/AVRISelLowering.cpp | 4 +--
.../Target/Hexagon/HexagonISelDAGToDAG.cpp | 2 +-
.../Target/Hexagon/HexagonISelLoweringHVX.cpp | 6 ++--
.../LoongArch/LoongArchISelLowering.cpp | 19 +++++------
llvm/lib/Target/M68k/M68kISelLowering.cpp | 4 +--
llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp | 4 +--
llvm/lib/Target/MSP430/MSP430ISelLowering.cpp | 6 ++--
llvm/lib/Target/Mips/MipsISelLowering.cpp | 3 +-
llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 8 ++---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 2 +-
llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp | 14 ++++----
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 16 ++++-----
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 19 +++++------
.../Target/SystemZ/SystemZISelDAGToDAG.cpp | 8 ++---
.../Target/SystemZ/SystemZISelLowering.cpp | 27 ++++++---------
.../WebAssembly/WebAssemblyISelLowering.cpp | 7 ++--
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 2 +-
llvm/lib/Target/X86/X86ISelLowering.cpp | 24 ++++++-------
llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp | 2 +-
40 files changed, 167 insertions(+), 189 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 5c44538fe69974..d13badd83f5493 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -923,6 +923,7 @@ END_TWO_BYTE_PACK()
/// Helper method returns the integer value of a ConstantSDNode operand.
inline uint64_t getConstantOperandVal(unsigned Num) const;
+ inline uint64_t getAsConstantVal() const;
/// Helper method returns the APInt of a ConstantSDNode operand.
inline const APInt &getConstantOperandAPInt(unsigned Num) const;
@@ -1640,6 +1641,10 @@ uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
}
+uint64_t SDNode::getAsConstantVal() const {
+ return cast<ConstantSDNode>(this)->getZExtValue();
+}
+
const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index eafa95ce7fcf71..9a41531ce9fba6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -14709,7 +14709,7 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue EltNo = N0->getOperand(1);
if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
- int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+ int Elt = EltNo->getAsConstantVal();
int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
SDLoc DL(N);
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 34fa1f5a7ed1fb..248f26ce5cecb1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -551,7 +551,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
SDValue N0 = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
SDValue N2 = Node->getOperand(2);
- unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue();
+ unsigned SubIdx = N2->getAsConstantVal();
// Figure out the register class to create for the destreg. It should be
// the largest legal register class supporting SubIdx sub-registers.
@@ -650,7 +650,7 @@ void InstrEmitter::EmitRegSequence(SDNode *Node,
// Skip physical registers as they don't have a vreg to get and we'll
// insert copies for them in TwoAddressInstructionPass anyway.
if (!R || !R->getReg().isPhysical()) {
- unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue();
+ unsigned SubIdx = Op->getAsConstantVal();
unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
const TargetRegisterClass *SRC =
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 65919a64b80653..00522535daca1f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -2490,7 +2490,7 @@ SDValue DAGTypeLegalizer::PromoteFloatRes_EXTRACT_VECTOR_ELT(SDNode *N) {
EVT VecVT = Vec->getValueType(0);
EVT EltVT = VecVT.getVectorElementType();
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsConstantVal();
switch (getTypeAction(VecVT)) {
default: break;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 3d21bd22e6ef5d..5a848fce42b689 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -5557,7 +5557,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N) {
getTypeAction(InVT) == TargetLowering::TypeLegal) {
EVT NInVT = InVT.getHalfNumVectorElementsVT(*DAG.getContext());
unsigned NElts = NInVT.getVectorMinNumElements();
- uint64_t IdxVal = cast<ConstantSDNode>(BaseIdx)->getZExtValue();
+ uint64_t IdxVal = BaseIdx->getAsConstantVal();
SDValue Step1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NInVT, InOp0,
DAG.getConstant(alignDown(IdxVal, NElts), dl,
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 66461b26468f79..475f6fba60ec9a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1442,7 +1442,7 @@ void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo,
std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, LoVT, Vec, Idx);
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsConstantVal();
Hi = DAG.getNode(
ISD::EXTRACT_SUBVECTOR, dl, HiVT, Vec,
DAG.getVectorIdxConstant(IdxVal + LoVT.getVectorMinNumElements(), dl));
@@ -1466,7 +1466,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo,
// If we know the index is in the first half, and we know the subvector
// doesn't cross the boundary between the halves, we can avoid spilling the
// vector, and insert into the lower half of the split vector directly.
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ unsigned IdxVal = Idx->getAsConstantVal();
if (IdxVal + SubElems <= LoElems) {
Lo = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, LoVT, Lo, SubVec, Idx);
return;
@@ -3279,7 +3279,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_INSERT_SUBVECTOR(SDNode *N,
SDValue Lo, Hi;
GetSplitVector(SubVec, Lo, Hi);
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsConstantVal();
uint64_t LoElts = Lo.getValueType().getVectorMinNumElements();
SDValue FirstInsertion =
@@ -3301,7 +3301,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
GetSplitVector(N->getOperand(0), Lo, Hi);
uint64_t LoEltsMin = Lo.getValueType().getVectorMinNumElements();
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsConstantVal();
if (IdxVal < LoEltsMin) {
assert(IdxVal + SubVT.getVectorMinNumElements() <= LoEltsMin &&
@@ -5257,7 +5257,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
EVT InVT = InOp.getValueType();
// Check if we can just return the input vector after widening.
- uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ uint64_t IdxVal = Idx->getAsConstantVal();
if (IdxVal == 0 && InVT == WidenVT)
return InOp;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index eb4deb6306fd5f..91a0caad55b19e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -7193,8 +7193,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
assert(isa<ConstantSDNode>(N3) &&
"Insert subvector index must be constant");
assert((VT.isScalableVector() != N2VT.isScalableVector() ||
- (N2VT.getVectorMinNumElements() +
- cast<ConstantSDNode>(N3)->getZExtValue()) <=
+ (N2VT.getVectorMinNumElements() + N3->getAsConstantVal()) <=
VT.getVectorMinNumElements()) &&
"Insert subvector overflow!");
assert(cast<ConstantSDNode>(N3)->getAPIntValue().getBitWidth() ==
@@ -9978,13 +9977,13 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
VTList.VTs[0].getVectorElementCount() ==
Ops[1].getValueType().getVectorElementCount()) &&
"Vector element count mismatch!");
- assert(VTList.VTs[0].isFloatingPoint() &&
- Ops[1].getValueType().isFloatingPoint() &&
- VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
- isa<ConstantSDNode>(Ops[2]) &&
- (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 ||
- cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) &&
- "Invalid STRICT_FP_ROUND!");
+ assert(
+ VTList.VTs[0].isFloatingPoint() &&
+ Ops[1].getValueType().isFloatingPoint() &&
+ VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
+ isa<ConstantSDNode>(Ops[2]) &&
+ (Ops[2]->getAsConstantVal() == 0 || Ops[2]->getAsConstantVal() == 1) &&
+ "Invalid STRICT_FP_ROUND!");
break;
#if 0
// FIXME: figure out how to safely handle things like
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 3c4b285cb06747..ca37b3da065d1c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -5642,7 +5642,7 @@ static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
// expansion/promotion) if it was possible to expand a libcall of an
// illegal type during operation legalization. But it's not, so things
// get a bit hacky.
- unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
+ unsigned ScaleInt = Scale->getAsConstantVal();
if ((ScaleInt > 0 || (Saturating && Signed)) &&
(TLI.isTypeLegal(VT) ||
(VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
@@ -7655,8 +7655,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// suitable for the target. Convert the index as required.
MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
if (Index.getValueType() != VectorIdxTy)
- Index = DAG.getVectorIdxConstant(
- cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
+ Index = DAG.getVectorIdxConstant(Index->getAsConstantVal(), sdl);
EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
@@ -7672,8 +7671,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// suitable for the target. Convert the index as required.
MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
if (Index.getValueType() != VectorIdxTy)
- Index = DAG.getVectorIdxConstant(
- cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
+ Index = DAG.getVectorIdxConstant(Index->getAsConstantVal(), sdl);
setValue(&I,
DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
@@ -8136,7 +8134,7 @@ void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
case ISD::VP_IS_FPCLASS: {
const DataLayout DLayout = DAG.getDataLayout();
EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
- auto Constant = cast<ConstantSDNode>(OpValues[1])->getZExtValue();
+ auto Constant = OpValues[1]->getAsConstantVal();
SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
{OpValues[0], Check, OpValues[2], OpValues[3]});
@@ -9173,8 +9171,7 @@ findMatchingInlineAsmOperand(unsigned OperandNo,
unsigned CurOp = InlineAsm::Op_FirstOperand;
for (; OperandNo; --OperandNo) {
// Advance to the next operand.
- unsigned OpFlag =
- cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
+ unsigned OpFlag = AsmNodeOperands[CurOp]->getAsConstantVal();
const InlineAsm::Flag F(OpFlag);
assert(
(F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
@@ -9480,8 +9477,7 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
// just use its register.
auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
AsmNodeOperands);
- InlineAsm::Flag Flag(
- cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue());
+ InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsConstantVal());
if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
if (OpInfo.isIndirect) {
// This happens on gcc/testsuite/gcc.dg/pr8788-1.c
@@ -9985,14 +9981,14 @@ void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
// constant nodes.
SDValue ID = getValue(CI.getArgOperand(0));
assert(ID.getValueType() == MVT::i64);
- SDValue IDConst = DAG.getTargetConstant(
- cast<ConstantSDNode>(ID)->getZExtValue(), DL, ID.getValueType());
+ SDValue IDConst =
+ DAG.getTargetConstant(ID->getAsConstantVal(), DL, ID.getValueType());
Ops.push_back(IDConst);
SDValue Shad = getValue(CI.getArgOperand(1));
assert(Shad.getValueType() == MVT::i32);
- SDValue ShadConst = DAG.getTargetConstant(
- cast<ConstantSDNode>(Shad)->getZExtValue(), DL, Shad.getValueType());
+ SDValue ShadConst =
+ DAG.getTargetConstant(Shad->getAsConstantVal(), DL, Shad.getValueType());
Ops.push_back(ShadConst);
// Add the live variables.
@@ -10041,7 +10037,7 @@ void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
// Get the real number of arguments participating in the call <numArgs>
SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
- unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
+ unsigned NumArgs = NArgVal->getAsConstantVal();
// Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
// Intrinsics include all meta-operands up to but not including CC.
@@ -10088,12 +10084,10 @@ void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
// Add the <id> and <numBytes> constants.
SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
- Ops.push_back(DAG.getTargetConstant(
- cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
+ Ops.push_back(DAG.getTargetConstant(IDVal->getAsConstantVal(), dl, MVT::i64));
SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
- Ops.push_back(DAG.getTargetConstant(
- cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
- MVT::i32));
+ Ops.push_back(
+ DAG.getTargetConstant(NBytesVal->getAsConstantVal(), dl, MVT::i32));
// Add the callee.
Ops.push_back(Callee);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 78cc60084068a5..93fe7f49c6aee3 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -149,7 +149,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN: {
unsigned OpNo = getOpcode() == ISD::INTRINSIC_WO_CHAIN ? 0 : 1;
- unsigned IID = cast<ConstantSDNode>(getOperand(OpNo))->getZExtValue();
+ unsigned IID = getOperand(OpNo)->getAsConstantVal();
if (IID < Intrinsic::num_intrinsics)
return Intrinsic::getBaseName((Intrinsic::ID)IID).str();
if (!G)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index f28211ac113ca9..72b03e00fd90ae 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -2121,7 +2121,7 @@ void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
--e; // Don't process a glue operand if it is here.
while (i != e) {
- InlineAsm::Flag Flags(cast<ConstantSDNode>(InOps[i])->getZExtValue());
+ InlineAsm::Flag Flags(InOps[i]->getAsConstantVal());
if (!Flags.isMemKind() && !Flags.isFuncKind()) {
// Just skip over this operand, copying the operands verbatim.
Ops.insert(Ops.end(), InOps.begin() + i,
@@ -2135,12 +2135,10 @@ void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
if (Flags.isUseOperandTiedToDef(TiedToOperand)) {
// We need the constraint ID from the operand this is tied to.
unsigned CurOp = InlineAsm::Op_FirstOperand;
- Flags =
- InlineAsm::Flag(cast<ConstantSDNode>(InOps[CurOp])->getZExtValue());
+ Flags = InlineAsm::Flag(InOps[CurOp]->getAsConstantVal());
for (; TiedToOperand; --TiedToOperand) {
CurOp += Flags.getNumOperandRegisters() + 1;
- Flags = InlineAsm::Flag(
- cast<ConstantSDNode>(InOps[CurOp])->getZExtValue());
+ Flags = InlineAsm::Flag(InOps[CurOp]->getAsConstantVal());
}
}
@@ -2380,9 +2378,8 @@ void SelectionDAGISel::pushStackMapLiveVariable(SmallVectorImpl<SDValue> &Ops,
if (OpNode->getOpcode() == ISD::Constant) {
Ops.push_back(
CurDAG->getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
- Ops.push_back(
- CurDAG->getTargetConstant(cast<ConstantSDNode>(OpNode)->getZExtValue(),
- DL, OpVal.getValueType()));
+ Ops.push_back(CurDAG->getTargetConstant(OpNode->getAsConstantVal(), DL,
+ OpVal.getValueType()));
} else {
Ops.push_back(OpVal);
}
@@ -2452,7 +2449,7 @@ void SelectionDAGISel::Select_PATCHPOINT(SDNode *N) {
Ops.push_back(*It++);
// Push the args for the call.
- for (uint64_t I = cast<ConstantSDNode>(NumArgs)->getZExtValue(); I != 0; I--)
+ for (uint64_t I = NumArgs->getAsConstantVal(); I != 0; I--)
Ops.push_back(*It++);
// Now push the live variables.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 476d99c2a7e045..e28258b7025316 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -580,7 +580,7 @@ bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
if (!isa<ConstantSDNode>(N.getNode()))
return false;
- uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
+ uint64_t Immed = N.getNode()->getAsConstantVal();
unsigned ShiftAmt;
if (Immed >> 12 == 0) {
@@ -611,7 +611,7 @@ bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
return false;
// The immediate operand must be a 24-bit zero-extended immediate.
- uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
+ uint64_t Immed = N.getNode()->getAsConstantVal();
// This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
// have the opposite effect on the C flag, so this pattern mustn't match under
@@ -1326,7 +1326,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
// MOV X0, WideImmediate
// LDR X2, [BaseReg, X0]
if (isa<ConstantSDNode>(RHS)) {
- int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
+ int64_t ImmOff = (int64_t)RHS->getAsConstantVal();
// Skip the immediate can be selected by load/store addressing mode.
// Also skip the immediate can be encoded by a single ADD (SUB is also
// checked by using -ImmOff).
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c000f33e2c5415..1269efbb840b04 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3589,7 +3589,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// can be turned into:
// cmp w12, w11, lsl #1
if (!isa<ConstantSDNode>(RHS) ||
- !isLegalArithImmed(cast<ConstantSDNode>(RHS)->getZExtValue())) {
+ !isLegalArithImmed(RHS->getAsConstantVal())) {
SDValue TheLHS = isCMN(LHS, CC) ? LHS.getOperand(1) : LHS;
if (getCmpOperandFoldingProfit(TheLHS) > getCmpOperandFoldingProfit(RHS)) {
@@ -3623,7 +3623,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD &&
cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 &&
LHS.getNode()->hasNUsesOfValue(1, 0)) {
- int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue();
+ int16_t ValueofRHS = RHS->getAsConstantVal();
if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) {
SDValue SExt =
DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS,
@@ -5619,7 +5619,7 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
// SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
// must be calculated before hand.
- uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
+ uint64_t ScaleVal = Scale->getAsConstantVal();
if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
EVT IndexVT = Index.getValueType();
@@ -5707,7 +5707,7 @@ SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
// SVE supports an index scaled by sizeof(MemVT.elt) only, everything else
// must be calculated before hand.
- uint64_t ScaleVal = cast<ConstantSDNode>(Scale)->getZExtValue();
+ uint64_t ScaleVal = Scale->getAsConstantVal();
if (IsScaled && ScaleVal != MemVT.getScalarStoreSize()) {
assert(isPowerOf2_64(ScaleVal) && "Expecting power-of-two types");
EVT IndexVT = Index.getValueType();
@@ -22012,7 +22012,7 @@ static SDValue performBRCONDCombine(SDNode *N,
SDValue Cmp = N->getOperand(3);
assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!");
- unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue();
+ unsigned CC = CCVal->getAsConstantVal();
if (CC != AArch64CC::EQ && CC != AArch64CC::NE)
return SDValue();
diff --git a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
index 1a76f354589eee..3415b533047d1a 100644
--- a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
@@ -172,7 +172,7 @@ static SDValue EmitUnrolledSetTag(SelectionDAG &DAG, const SDLoc &dl,
SDValue AArch64SelectionDAGInfo::EmitTargetCodeForSetTag(
SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr,
SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const {
- uint64_t ObjSize = cast<ConstantSDNode>(Size)->getZExtValue();
+ uint64_t ObjSize = Size->getAsConstantVal();
assert(ObjSize % 16 == 0);
MachineFunction &MF = DAG.getMachineFunction();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 28604af2cdb3ce..3a78024dd79c2d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -382,7 +382,7 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
Subtarget->getRegisterInfo()->getRegClass(RCID);
SDValue SubRegOp = N->getOperand(OpNo + 1);
- unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
+ unsigned SubRegIdx = SubRegOp->getAsConstantVal();
return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
SubRegIdx);
}
diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index 9a2fb0bc37b2c0..6b43e47dc8a378 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -1651,7 +1651,7 @@ SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[],
BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap);
for (unsigned i = 0; i < 4; i++) {
- unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
+ unsigned Idx = Swz[i]->getAsConstantVal();
if (SwizzleRemap.contains(Idx))
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32);
}
@@ -1659,7 +1659,7 @@ SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[],
SwizzleRemap.clear();
BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap);
for (unsigned i = 0; i < 4; i++) {
- unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
+ unsigned Idx = Swz[i]->getAsConstantVal();
if (SwizzleRemap.contains(Idx))
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32);
}
@@ -1780,7 +1780,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
// Check that we know which element is being inserted
if (!isa<ConstantSDNode>(EltNo))
return SDValue();
- unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+ unsigned Elt = EltNo->getAsConstantVal();
// Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
// be converted to a BUILD_VECTOR). Fill in the Ops vector with the
@@ -2021,7 +2021,7 @@ bool R600TargetLowering::FoldOperand(SDNode *ParentNode, unsigned SrcIdx,
}
case R600::MOV_IMM_GLOBAL_ADDR:
// Check if the Imm slot is used. Taken from below.
- if (cast<ConstantSDNode>(Imm)->getZExtValue())
+ if (Imm->getAsConstantVal())
return false;
Imm = Src.getOperand(0);
Src = DAG.getRegister(R600::ALU_LITERAL_X, MVT::i32);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 2cb08e025dd901..00effad1d84c34 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6423,7 +6423,7 @@ SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
EVT InsVT = Ins.getValueType();
EVT EltVT = VecVT.getVectorElementType();
unsigned InsNumElts = InsVT.getVectorNumElements();
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ unsigned IdxVal = Idx->getAsConstantVal();
SDLoc SL(Op);
if (EltVT.getScalarSizeInBits() == 16 && IdxVal % 2 == 0) {
@@ -7452,7 +7452,7 @@ SDValue SITargetLowering::lowerImage(SDValue Op,
Ops.push_back(IsA16 ? True : False);
if (!Subtarget->hasGFX90AInsts()) {
Ops.push_back(TFE); //tfe
- } else if (cast<ConstantSDNode>(TFE)->getZExtValue()) {
+ } else if (TFE->getAsConstantVal()) {
report_fatal_error("TFE is not supported on this GPU");
}
if (!IsGFX12Plus || BaseOpcode->Sampler || BaseOpcode->MSAA)
@@ -7589,7 +7589,7 @@ SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
setBufferOffsets(Offset, DAG, &Ops[3],
NumLoads > 1 ? Align(16 * NumLoads) : Align(4));
- uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue();
+ uint64_t InstOffset = Ops[5]->getAsConstantVal();
for (unsigned i = 0; i < NumLoads; ++i) {
Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32);
Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops,
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index aae6f2e842fd1a..4a8b80c2852bf7 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -338,8 +338,8 @@ bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
return false;
- Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
- Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
+ Offset0 = Off0->getAsConstantVal();
+ Offset1 = Off1->getAsConstantVal();
return true;
}
diff --git a/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp b/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp
index 28e35f8f2a548a..aec01c002ea291 100644
--- a/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARC/ARCISelDAGToDAG.cpp
@@ -170,7 +170,7 @@ bool ARCDAGToDAGISel::SelectFrameADDR_ri(SDValue Addr, SDValue &Base,
void ARCDAGToDAGISel::Select(SDNode *N) {
switch (N->getOpcode()) {
case ISD::Constant: {
- uint64_t CVal = cast<ConstantSDNode>(N)->getZExtValue();
+ uint64_t CVal = N->getAsConstantVal();
ReplaceNode(N, CurDAG->getMachineNode(
isInt<12>(CVal) ? ARC::MOV_rs12 : ARC::MOV_rlimm,
SDLoc(N), MVT::i32,
diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index adc429b61bbcce..29dad62d197034 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -372,7 +372,7 @@ INITIALIZE_PASS(ARMDAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
/// operand. If so Imm will receive the 32-bit value.
static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
- Imm = cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = N->getAsConstantVal();
return true;
}
return false;
@@ -1101,8 +1101,8 @@ bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
Offset = N.getOperand(0);
SDValue N1 = N.getOperand(1);
- Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
- SDLoc(N), MVT::i32);
+ Label =
+ CurDAG->getTargetConstant(N1->getAsConstantVal(), SDLoc(N), MVT::i32);
return true;
}
@@ -1942,7 +1942,7 @@ SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, const SDLoc &dl,
if (!is64BitVector && NumVecs < 3)
NumRegs *= 2;
- unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
+ unsigned Alignment = Align->getAsConstantVal();
if (Alignment >= 32 && NumRegs == 4)
Alignment = 32;
else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
@@ -2428,7 +2428,7 @@ void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
unsigned Alignment = 0;
if (NumVecs != 3) {
- Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
+ Alignment = Align->getAsConstantVal();
unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
if (Alignment > NumBytes)
Alignment = NumBytes;
@@ -2871,7 +2871,7 @@ void ARMDAGToDAGISel::SelectMVE_VxDUP(SDNode *N, const uint16_t *Opcodes,
Ops.push_back(N->getOperand(OpIdx++)); // limit
SDValue ImmOp = N->getOperand(OpIdx++); // step
- int ImmValue = cast<ConstantSDNode>(ImmOp)->getZExtValue();
+ int ImmValue = ImmOp->getAsConstantVal();
Ops.push_back(getI32Imm(ImmValue, Loc));
if (Predicated)
@@ -2892,7 +2892,7 @@ void ARMDAGToDAGISel::SelectCDE_CXxD(SDNode *N, uint16_t Opcode,
// Convert and append the immediate operand designating the coprocessor.
SDValue ImmCorpoc = N->getOperand(OpIdx++);
- uint32_t ImmCoprocVal = cast<ConstantSDNode>(ImmCorpoc)->getZExtValue();
+ uint32_t ImmCoprocVal = ImmCorpoc->getAsConstantVal();
Ops.push_back(getI32Imm(ImmCoprocVal, Loc));
// For accumulating variants copy the low and high order parts of the
@@ -2911,7 +2911,7 @@ void ARMDAGToDAGISel::SelectCDE_CXxD(SDNode *N, uint16_t Opcode,
// Convert and append the immediate operand
SDValue Imm = N->getOperand(OpIdx);
- uint32_t ImmVal = cast<ConstantSDNode>(Imm)->getZExtValue();
+ uint32_t ImmVal = Imm->getAsConstantVal();
Ops.push_back(getI32Imm(ImmVal, Loc));
// Accumulating variants are IT-predicable, add predicate operands.
@@ -2965,7 +2965,7 @@ void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool IsIntrinsic,
unsigned Alignment = 0;
if (NumVecs != 3) {
- Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
+ Alignment = Align->getAsConstantVal();
unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
if (Alignment > NumBytes)
Alignment = NumBytes;
@@ -3697,7 +3697,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
// Other cases are autogenerated.
break;
case ISD::Constant: {
- unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
+ unsigned Val = N->getAsConstantVal();
// If we can't materialize the constant we need to use a literal pool
if (ConstantMaterializationCost(Val, Subtarget) > 2 &&
!Subtarget->genExecuteOnly()) {
@@ -4132,7 +4132,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
assert(N2.getOpcode() == ISD::Constant);
assert(N3.getOpcode() == ISD::Register);
- unsigned CC = (unsigned) cast<ConstantSDNode>(N2)->getZExtValue();
+ unsigned CC = (unsigned)N2->getAsConstantVal();
if (InGlue.getOpcode() == ARMISD::CMPZ) {
if (InGlue.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN) {
@@ -4243,8 +4243,7 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
if (SwitchEQNEToPLMI) {
SDValue ARMcc = N->getOperand(2);
- ARMCC::CondCodes CC =
- (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsConstantVal();
switch (CC) {
default: llvm_unreachable("CMPZ must be either NE or EQ!");
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index cf9646a0b81ed0..cce000eeef15c3 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -4820,8 +4820,7 @@ SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// some tweaks to the heuristics for the previous and->shift transform.
// FIXME: Optimize cases where the LHS isn't a shift.
if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL &&
- isa<ConstantSDNode>(RHS) &&
- cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U &&
+ isa<ConstantSDNode>(RHS) && RHS->getAsConstantVal() == 0x80000000U &&
CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) &&
LHS.getConstantOperandVal(1) < 31) {
unsigned ShiftAmt = LHS.getConstantOperandVal(1) + 1;
@@ -5533,7 +5532,7 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
// Choose GE over PL, which vsel does now support
- if (cast<ConstantSDNode>(ARMcc)->getZExtValue() == ARMCC::PL)
+ if (ARMcc->getAsConstantVal() == ARMCC::PL)
ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32);
return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
}
@@ -7749,7 +7748,7 @@ static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
uint64_t Val;
if (!isa<ConstantSDNode>(N))
return SDValue();
- Val = cast<ConstantSDNode>(N)->getZExtValue();
+ Val = N->getAsConstantVal();
if (ST->isThumb1Only()) {
if (Val <= 255 || ~Val <= 255)
@@ -7804,7 +7803,7 @@ static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
SDValue V = Op.getOperand(i);
if (!isa<ConstantSDNode>(V) && !V.isUndef())
continue;
- bool BitSet = V.isUndef() ? false : cast<ConstantSDNode>(V)->getZExtValue();
+ bool BitSet = V.isUndef() ? false : V->getAsConstantVal();
if (BitSet)
Bits32 |= BoolMask << (i * BitsPerBool);
}
@@ -9240,7 +9239,7 @@ static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG,
EVT VT = Op.getValueType();
EVT Op1VT = V1.getValueType();
unsigned NumElts = VT.getVectorNumElements();
- unsigned Index = cast<ConstantSDNode>(V2)->getZExtValue();
+ unsigned Index = V2->getAsConstantVal();
assert(VT.getScalarSizeInBits() == 1 &&
"Unexpected custom EXTRACT_SUBVECTOR lowering");
@@ -14618,7 +14617,7 @@ static SDValue PerformORCombineToBFI(SDNode *N,
// Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
// where lsb(mask) == #shamt and masked bits of B are known zero.
SDValue ShAmt = N00.getOperand(1);
- unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
+ unsigned ShAmtC = ShAmt->getAsConstantVal();
unsigned LSB = llvm::countr_zero(Mask);
if (ShAmtC != LSB)
return SDValue();
@@ -18339,8 +18338,7 @@ ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
SDValue Chain = N->getOperand(0);
SDValue BB = N->getOperand(1);
SDValue ARMcc = N->getOperand(2);
- ARMCC::CondCodes CC =
- (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsConstantVal();
// (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
// -> (brcond Chain BB CC CPSR Cmp)
@@ -18373,8 +18371,7 @@ ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
SDValue FalseVal = N->getOperand(0);
SDValue TrueVal = N->getOperand(1);
SDValue ARMcc = N->getOperand(2);
- ARMCC::CondCodes CC =
- (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsConstantVal();
// BFI is only available on V6T2+.
if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index d36bfb188ed360..be0cec36eebdf3 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -660,7 +660,7 @@ SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS,
SDValue Cmp;
if (LHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(RHS)) {
- uint64_t Imm = cast<ConstantSDNode>(RHS)->getZExtValue();
+ uint64_t Imm = RHS->getAsConstantVal();
// Generate a CPI/CPC pair if RHS is a 16-bit constant. Use the zero
// register for the constant RHS if its lower or higher byte is zero.
SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
@@ -680,7 +680,7 @@ SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS,
} else if (RHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(LHS)) {
// Generate a CPI/CPC pair if LHS is a 16-bit constant. Use the zero
// register for the constant LHS if its lower or higher byte is zero.
- uint64_t Imm = cast<ConstantSDNode>(LHS)->getZExtValue();
+ uint64_t Imm = LHS->getAsConstantVal();
SDValue LHSlo = (Imm & 0xff) == 0
? DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8)
: DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index eb5c59672224e9..eeb18056709a34 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -743,7 +743,7 @@ void HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
//
void HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
if (N->getValueType(0) == MVT::i1) {
- assert(!(cast<ConstantSDNode>(N)->getZExtValue() >> 1));
+ assert(!(N->getAsConstantVal() >> 1));
unsigned Opc = (cast<ConstantSDNode>(N)->getSExtValue() != 0)
? Hexagon::PS_true
: Hexagon::PS_false;
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
index 665e2d79c83d10..d84c0939a66bfc 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
@@ -1256,7 +1256,7 @@ HexagonTargetLowering::extractHvxSubvectorReg(SDValue OrigOp, SDValue VecV,
SDValue IdxV, const SDLoc &dl, MVT ResTy, SelectionDAG &DAG) const {
MVT VecTy = ty(VecV);
unsigned HwLen = Subtarget.getVectorLength();
- unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue();
+ unsigned Idx = IdxV.getNode()->getAsConstantVal();
MVT ElemTy = VecTy.getVectorElementType();
unsigned ElemWidth = ElemTy.getSizeInBits();
@@ -1299,7 +1299,7 @@ HexagonTargetLowering::extractHvxSubvectorPred(SDValue VecV, SDValue IdxV,
MVT ByteTy = MVT::getVectorVT(MVT::i8, HwLen);
SDValue ByteVec = DAG.getNode(HexagonISD::Q2V, dl, ByteTy, VecV);
// IdxV is required to be a constant.
- unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue();
+ unsigned Idx = IdxV.getNode()->getAsConstantVal();
unsigned ResLen = ResTy.getVectorNumElements();
unsigned BitBytes = HwLen / VecTy.getVectorNumElements();
@@ -1801,7 +1801,7 @@ HexagonTargetLowering::LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG)
MVT SrcTy = ty(SrcV);
MVT DstTy = ty(Op);
SDValue IdxV = Op.getOperand(1);
- unsigned Idx = cast<ConstantSDNode>(IdxV.getNode())->getZExtValue();
+ unsigned Idx = IdxV.getNode()->getAsConstantVal();
assert(Idx % DstTy.getVectorNumElements() == 0);
(void)Idx;
const SDLoc &dl(Op);
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index e14bbadf9ed227..a776511fb9f493 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -525,8 +525,7 @@ LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
if (isa<ConstantSDNode>(Idx) &&
(EltTy == MVT::i32 || EltTy == MVT::i64 || EltTy == MVT::f32 ||
- EltTy == MVT::f64 ||
- cast<ConstantSDNode>(Idx)->getZExtValue() < NumElts / 2))
+ EltTy == MVT::f64 || Idx->getAsConstantVal() < NumElts / 2))
return Op;
return SDValue();
@@ -1383,28 +1382,28 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.is64Bit())
return emitIntrinsicErrorMessage(Op, ErrorMsgReqLA32, DAG);
// call void @llvm.loongarch.cacop.[d/w](uimm5, rj, simm12)
- unsigned Imm1 = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm1 = Op2->getAsConstantVal();
int Imm2 = cast<ConstantSDNode>(Op.getOperand(4))->getSExtValue();
if (!isUInt<5>(Imm1) || !isInt<12>(Imm2))
return emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG);
return Op;
}
case Intrinsic::loongarch_dbar: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::DBAR, DL, MVT::Other, Chain,
DAG.getConstant(Imm, DL, GRLenVT));
}
case Intrinsic::loongarch_ibar: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::IBAR, DL, MVT::Other, Chain,
DAG.getConstant(Imm, DL, GRLenVT));
}
case Intrinsic::loongarch_break: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::BREAK, DL, MVT::Other, Chain,
@@ -1413,7 +1412,7 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
case Intrinsic::loongarch_movgr2fcsr: {
if (!Subtarget.hasBasicF())
return emitIntrinsicErrorMessage(Op, ErrorMsgReqF, DAG);
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
return !isUInt<2>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::MOVGR2FCSR, DL, MVT::Other, Chain,
@@ -1422,7 +1421,7 @@ SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
Op.getOperand(3)));
}
case Intrinsic::loongarch_syscall: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
return !isUInt<15>(Imm)
? emitIntrinsicErrorMessage(Op, ErrorMsgOOR, DAG)
: DAG.getNode(LoongArchISD::SYSCALL, DL, MVT::Other, Chain,
@@ -1925,7 +1924,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqF);
return;
}
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
if (!isUInt<2>(Imm)) {
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgOOR);
return;
@@ -1981,7 +1980,7 @@ void LoongArchTargetLowering::ReplaceNodeResults(
CSR_CASE(iocsrrd_d);
#undef CSR_CASE
case Intrinsic::loongarch_csrrd_w: {
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
if (!isUInt<14>(Imm)) {
emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgOOR);
return;
diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp
index c4d7a0dec7f390..b26ed5c4749de6 100644
--- a/llvm/lib/Target/M68k/M68kISelLowering.cpp
+++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp
@@ -2375,7 +2375,7 @@ SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// a >= b ? -1 : 0 -> RES = setcc_carry
// a >= b ? 0 : -1 -> RES = ~setcc_carry
if (Cond.getOpcode() == M68kISD::SUB) {
- unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
+ unsigned CondCode = CC->getAsConstantVal();
if ((CondCode == M68k::COND_CC || CondCode == M68k::COND_CS) &&
(isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
@@ -2491,7 +2491,7 @@ SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
Cond = Cmp;
AddTest = false;
} else {
- switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
+ switch (CC->getAsConstantVal()) {
default:
break;
case M68k::COND_VS:
diff --git a/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 660861a5d52182..b4cee26cc09b33 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -308,12 +308,12 @@ static bool isValidIndexedLoad(const LoadSDNode *LD) {
switch (VT.getSimpleVT().SimpleTy) {
case MVT::i8:
- if (cast<ConstantSDNode>(LD->getOffset())->getZExtValue() != 1)
+ if (LD->getOffset()->getAsConstantVal() != 1)
return false;
break;
case MVT::i16:
- if (cast<ConstantSDNode>(LD->getOffset())->getZExtValue() != 2)
+ if (LD->getOffset()->getAsConstantVal() != 2)
return false;
break;
diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index d3b59138a5a95e..3174d613cc38bf 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -1168,8 +1168,8 @@ SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
bool Invert = false;
bool Shift = false;
bool Convert = true;
- switch (cast<ConstantSDNode>(TargetCC)->getZExtValue()) {
- default:
+ switch (TargetCC->getAsConstantVal()) {
+ default:
Convert = false;
break;
case MSP430CC::COND_HS:
@@ -1193,7 +1193,7 @@ SDValue MSP430TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
// C = ~Z for AND instruction, thus we can put Res = ~(SR & 1), however,
// Res = (SR >> 1) & 1 is 1 word shorter.
break;
- }
+ }
EVT VT = Op.getValueType();
SDValue One = DAG.getConstant(1, dl, VT);
if (Convert) {
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 483eba4e4f4794..301eb3169ef2f9 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -2042,8 +2042,7 @@ SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
return Op;
SDValue CCNode = CondRes.getOperand(2);
- Mips::CondCode CC =
- (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
+ Mips::CondCode CC = (Mips::CondCode)CCNode->getAsConstantVal();
unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T;
SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 0ed87ee0809a3e..7b386b024803a8 100644
--- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -76,7 +76,7 @@ void MipsSEDAGToDAGISel::addDSPCtrlRegOperands(bool IsDef, MachineInstr &MI,
}
unsigned MipsSEDAGToDAGISel::getMSACtrlReg(const SDValue RegIdx) const {
- uint64_t RegNum = cast<ConstantSDNode>(RegIdx)->getZExtValue();
+ uint64_t RegNum = RegIdx->getAsConstantVal();
return Mips::MSACtrlRegClass.getRegister(RegNum);
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 815c46edb6fa2a..2c9966696ed156 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -2076,7 +2076,7 @@ bool NVPTXDAGToDAGISel::tryLoadParam(SDNode *Node) {
VTs = CurDAG->getVTList(EVTs);
}
- unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
+ unsigned OffsetVal = Offset->getAsConstantVal();
SmallVector<SDValue, 2> Ops;
Ops.push_back(CurDAG->getTargetConstant(OffsetVal, DL, MVT::i32));
@@ -2091,7 +2091,7 @@ bool NVPTXDAGToDAGISel::tryStoreRetval(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
SDValue Offset = N->getOperand(1);
- unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
+ unsigned OffsetVal = Offset->getAsConstantVal();
MemSDNode *Mem = cast<MemSDNode>(N);
// How many elements do we have?
@@ -2158,9 +2158,9 @@ bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
SDValue Param = N->getOperand(1);
- unsigned ParamVal = cast<ConstantSDNode>(Param)->getZExtValue();
+ unsigned ParamVal = Param->getAsConstantVal();
SDValue Offset = N->getOperand(2);
- unsigned OffsetVal = cast<ConstantSDNode>(Offset)->getZExtValue();
+ unsigned OffsetVal = Offset->getAsConstantVal();
MemSDNode *Mem = cast<MemSDNode>(N);
SDValue Glue = N->getOperand(N->getNumOperands() - 1);
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index e8f36bf50a1b08..693e75bdd6ff1c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -5811,7 +5811,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
SDLoc DL(N);
// Get the intrinsic ID
- unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
+ unsigned IntrinNo = Intrin.getNode()->getAsConstantVal();
switch (IntrinNo) {
default:
return;
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index ed96339240d926..35b0f0a706480d 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -565,7 +565,7 @@ static bool hasTocDataAttr(SDValue Val, unsigned PointerSize) {
/// operand. If so Imm will receive the 32-bit value.
static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
- Imm = cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = N->getAsConstantVal();
return true;
}
return false;
@@ -575,7 +575,7 @@ static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
/// operand. If so Imm will receive the 64-bit value.
static bool isInt64Immediate(SDNode *N, uint64_t &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i64) {
- Imm = cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = N->getAsConstantVal();
return true;
}
return false;
@@ -1500,7 +1500,7 @@ static SDNode *selectI64Imm(SelectionDAG *CurDAG, SDNode *N) {
SDLoc dl(N);
// Get 64 bit value.
- int64_t Imm = cast<ConstantSDNode>(N)->getZExtValue();
+ int64_t Imm = N->getAsConstantVal();
if (unsigned MinSize = allUsesTruncate(CurDAG, N)) {
uint64_t SextImm = SignExtend64(Imm, MinSize);
SDValue SDImm = CurDAG->getTargetConstant(SextImm, dl, MVT::i64);
@@ -4923,7 +4923,7 @@ bool PPCDAGToDAGISel::trySelectLoopCountIntrinsic(SDNode *N) {
SDNode *NewDecrement = CurDAG->getMachineNode(DecrementOpcode, DecrementLoc,
MVT::i1, DecrementOps);
- unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
+ unsigned Val = RHS->getAsConstantVal();
bool IsBranchOnTrue = (CC == ISD::SETEQ && Val) || (CC == ISD::SETNE && !Val);
unsigned Opcode = IsBranchOnTrue ? PPC::BC : PPC::BCn;
@@ -5765,7 +5765,7 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
break;
// If the multiplier fits int16, we can handle it with mulli.
- int64_t Imm = cast<ConstantSDNode>(Op1)->getZExtValue();
+ int64_t Imm = Op1->getAsConstantVal();
unsigned Shift = llvm::countr_zero<uint64_t>(Imm);
if (isInt<16>(Imm) || !Shift)
break;
@@ -6612,8 +6612,8 @@ void PPCDAGToDAGISel::foldBoolExts(SDValue &Res, SDNode *&N) {
// For us to materialize these using one instruction, we must be able to
// represent them as signed 16-bit integers.
- uint64_t True = cast<ConstantSDNode>(TrueRes)->getZExtValue(),
- False = cast<ConstantSDNode>(FalseRes)->getZExtValue();
+ uint64_t True = TrueRes->getAsConstantVal(),
+ False = FalseRes->getAsConstantVal();
if (!isInt<16>(True) || !isInt<16>(False))
break;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 8f27e6677afa5e..5c865dae018e17 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2566,7 +2566,7 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
if (LeadingZero) {
if (!UniquedVals[Multiple-1].getNode())
return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef
- int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
+ int Val = UniquedVals[Multiple - 1]->getAsConstantVal();
if (Val < 16) // 0,0,0,4 -> vspltisw(4)
return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
}
@@ -2635,11 +2635,11 @@ bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
if (!isa<ConstantSDNode>(N))
return false;
- Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = (int16_t)N->getAsConstantVal();
if (N->getValueType(0) == MVT::i32)
- return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
+ return Imm == (int32_t)N->getAsConstantVal();
else
- return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
+ return Imm == (int64_t)N->getAsConstantVal();
}
bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
return isIntS16Immediate(Op.getNode(), Imm);
@@ -2684,7 +2684,7 @@ bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {
if (!isa<ConstantSDNode>(N))
return false;
- Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
+ Imm = (int64_t)N->getAsConstantVal();
return isInt<34>(Imm);
}
bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) {
@@ -15580,7 +15580,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
NarrowOp.getOpcode() != ISD::ROTL && NarrowOp.getOpcode() != ISD::ROTR)
break;
- uint64_t Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ uint64_t Imm = Op2->getAsConstantVal();
// Make sure that the constant is narrow enough to fit in the narrow type.
if (!isUInt<32>(Imm))
break;
@@ -16795,7 +16795,7 @@ void PPCTargetLowering::CollectTargetIntrinsicOperands(const CallInst &I,
return;
if (!isa<ConstantSDNode>(Ops[1].getNode()))
return;
- auto IntrinsicID = cast<ConstantSDNode>(Ops[1].getNode())->getZExtValue();
+ auto IntrinsicID = Ops[1].getNode()->getAsConstantVal();
if (IntrinsicID != Intrinsic::ppc_tdw && IntrinsicID != Intrinsic::ppc_tw &&
IntrinsicID != Intrinsic::ppc_trapd && IntrinsicID != Intrinsic::ppc_trap)
return;
@@ -18430,7 +18430,7 @@ PPC::AddrMode PPCTargetLowering::SelectOptimalAddrMode(const SDNode *Parent,
if (Flags & PPC::MOF_RPlusSImm16) {
SDValue Op0 = N.getOperand(0);
SDValue Op1 = N.getOperand(1);
- int16_t Imm = cast<ConstantSDNode>(Op1)->getZExtValue();
+ int16_t Imm = Op1->getAsConstantVal();
if (!Align || isAligned(*Align, Imm)) {
Disp = DAG.getTargetConstant(Imm, DL, N.getValueType());
Base = Op0;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 03a59f8a8b57ca..76bcdf98e89b55 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3489,7 +3489,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
for (unsigned I = 0; I < NumElts;) {
SDValue V = Op.getOperand(I);
- bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
+ bool BitValue = !V.isUndef() && V->getAsConstantVal();
Bits |= ((uint64_t)BitValue << BitPos);
++BitPos;
++I;
@@ -3620,7 +3620,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
for (const auto &OpIdx : enumerate(Op->op_values())) {
const auto &SeqV = OpIdx.value();
if (!SeqV.isUndef())
- SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
+ SplatValue |= ((SeqV->getAsConstantVal() & EltMask)
<< (OpIdx.index() * EltBitSize));
}
@@ -3676,8 +3676,8 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
// vector type.
for (const auto &SeqV : Sequence) {
if (!SeqV.isUndef())
- SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
- << (EltIdx * EltBitSize));
+ SplatValue |=
+ ((SeqV->getAsConstantVal() & EltMask) << (EltIdx * EltBitSize));
EltIdx++;
}
@@ -3938,8 +3938,7 @@ static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
(isa<RegisterSDNode>(VL) &&
cast<RegisterSDNode>(VL)->getReg() == RISCV::X0))
NewVL = DAG.getRegister(RISCV::X0, MVT::i32);
- else if (isa<ConstantSDNode>(VL) &&
- isUInt<4>(cast<ConstantSDNode>(VL)->getZExtValue()))
+ else if (isa<ConstantSDNode>(VL) && isUInt<4>(VL->getAsConstantVal()))
NewVL = DAG.getNode(ISD::ADD, DL, VL.getValueType(), VL, VL);
if (NewVL) {
@@ -7906,8 +7905,7 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
// Use tail agnostic policy if Idx is the last index of Vec.
unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED;
if (VecVT.isFixedLengthVector() && isa<ConstantSDNode>(Idx) &&
- cast<ConstantSDNode>(Idx)->getZExtValue() + 1 ==
- VecVT.getVectorNumElements())
+ Idx->getAsConstantVal() + 1 == VecVT.getVectorNumElements())
Policy = RISCVII::TAIL_AGNOSTIC;
SDValue Slideup = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, ValInVec,
Idx, Mask, InsertVL, Policy);
@@ -8167,7 +8165,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
const auto [MinVLMAX, MaxVLMAX] =
RISCVTargetLowering::computeVLMAXBounds(VT, Subtarget);
- uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
+ uint64_t AVLInt = AVL->getAsConstantVal();
if (AVLInt <= MinVLMAX) {
I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
} else if (AVLInt >= 2 * MaxVLMAX) {
@@ -8233,8 +8231,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
SDValue Mask = Operands[NumOps - 3];
SDValue MaskedOff = Operands[1];
// Assume Policy operand is the last operand.
- uint64_t Policy =
- cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
+ uint64_t Policy = Operands[NumOps - 1]->getAsConstantVal();
// We don't need to select maskedoff if it's undef.
if (MaskedOff.isUndef())
return Vec;
diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index c7d8591c5bdf6f..c0767f6ae4cd78 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -1641,7 +1641,7 @@ void SystemZDAGToDAGISel::Select(SDNode *Node) {
// If this is a 64-bit constant that is out of the range of LLILF,
// LLIHF and LGFI, split it into two 32-bit pieces.
if (Node->getValueType(0) == MVT::i64) {
- uint64_t Val = cast<ConstantSDNode>(Node)->getZExtValue();
+ uint64_t Val = Node->getAsConstantVal();
if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val)) {
splitLargeImmediate(ISD::OR, Node, SDValue(), Val - uint32_t(Val),
uint32_t(Val));
@@ -1677,10 +1677,8 @@ void SystemZDAGToDAGISel::Select(SDNode *Node) {
isInt<16>(cast<ConstantSDNode>(Op0)->getSExtValue())))) {
SDValue CCValid = Node->getOperand(2);
SDValue CCMask = Node->getOperand(3);
- uint64_t ConstCCValid =
- cast<ConstantSDNode>(CCValid.getNode())->getZExtValue();
- uint64_t ConstCCMask =
- cast<ConstantSDNode>(CCMask.getNode())->getZExtValue();
+ uint64_t ConstCCValid = CCValid.getNode()->getAsConstantVal();
+ uint64_t ConstCCMask = CCMask.getNode()->getAsConstantVal();
// Invert the condition.
CCMask = CurDAG->getTargetConstant(ConstCCValid ^ ConstCCMask,
SDLoc(Node), CCMask.getValueType());
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 045c4c0aac07ae..744951933ed879 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -2662,10 +2662,8 @@ static void adjustForFNeg(Comparison &C) {
// with (sext (trunc X)) into a comparison with (shl X, 32).
static void adjustForLTGFR(Comparison &C) {
// Check for a comparison between (shl X, 32) and 0.
- if (C.Op0.getOpcode() == ISD::SHL &&
- C.Op0.getValueType() == MVT::i64 &&
- C.Op1.getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ if (C.Op0.getOpcode() == ISD::SHL && C.Op0.getValueType() == MVT::i64 &&
+ C.Op1.getOpcode() == ISD::Constant && C.Op1->getAsConstantVal() == 0) {
auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
if (C1 && C1->getZExtValue() == 32) {
SDValue ShlOp0 = C.Op0.getOperand(0);
@@ -2690,7 +2688,7 @@ static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
C.Op1.getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(C.Op1)->getValueSizeInBits(0) <= 64 &&
- cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ C.Op1->getAsConstantVal() == 0) {
auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
C.Op0.getValueSizeInBits().getFixedValue()) {
@@ -3035,12 +3033,12 @@ static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid,
- cast<ConstantSDNode>(CmpOp1)->getZExtValue(), Cond);
+ CmpOp1->getAsConstantVal(), Cond);
if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 &&
isIntrinsicWithCC(CmpOp0, Opcode, CCValid))
return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid,
- cast<ConstantSDNode>(CmpOp1)->getZExtValue(), Cond);
+ CmpOp1->getAsConstantVal(), Cond);
}
Comparison C(CmpOp0, CmpOp1, Chain);
C.CCMask = CCMaskForCondCode(Cond);
@@ -3457,12 +3455,11 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
// Check for absolute and negative-absolute selections, including those
// where the comparison value is sign-extended (for LPGFR and LNGFR).
// This check supplements the one in DAGCombiner.
- if (C.Opcode == SystemZISD::ICMP &&
- C.CCMask != SystemZ::CCMASK_CMP_EQ &&
+ if (C.Opcode == SystemZISD::ICMP && C.CCMask != SystemZ::CCMASK_CMP_EQ &&
C.CCMask != SystemZ::CCMASK_CMP_NE &&
C.Op1.getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(C.Op1)->getValueSizeInBits(0) <= 64 &&
- cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ C.Op1->getAsConstantVal() == 0) {
if (isAbsolute(C.Op0, TrueOp, FalseOp))
return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
if (isAbsolute(C.Op0, FalseOp, TrueOp))
@@ -3947,8 +3944,7 @@ SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(SDValue Op,
// If user has set the no alignment function attribute, ignore
// alloca alignments.
- uint64_t AlignVal =
- (RealignOpt ? cast<ConstantSDNode>(Align)->getZExtValue() : 0);
+ uint64_t AlignVal = (RealignOpt ? Align->getAsConstantVal() : 0);
uint64_t StackAlign = TFI->getStackAlignment();
uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
@@ -4013,8 +4009,7 @@ SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(SDValue Op,
// If user has set the no alignment function attribute, ignore
// alloca alignments.
- uint64_t AlignVal =
- (RealignOpt ? cast<ConstantSDNode>(Align)->getZExtValue() : 0);
+ uint64_t AlignVal = (RealignOpt ? Align->getAsConstantVal() : 0);
uint64_t StackAlign = TFI->getStackAlignment();
uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
@@ -4213,7 +4208,7 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
// If the low part is a constant that is outside the range of LHI,
// then we're better off using IILF.
if (LowOp.getOpcode() == ISD::Constant) {
- int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
+ int64_t Value = int32_t(LowOp->getAsConstantVal());
if (!isInt<16>(Value))
return Op;
}
@@ -5897,7 +5892,7 @@ SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
Op1.getOpcode() != ISD::BITCAST &&
Op1.getOpcode() != ISD::ConstantFP &&
Op2.getOpcode() == ISD::Constant) {
- uint64_t Index = cast<ConstantSDNode>(Op2)->getZExtValue();
+ uint64_t Index = Op2->getAsConstantVal();
unsigned Mask = VT.getVectorNumElements() - 1;
if (Index <= Mask)
return Op;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 4bcf89690505ed..71c5430f1f9efa 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -1869,8 +1869,7 @@ SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
Ops[OpIdx++] = Op.getOperand(2);
while (OpIdx < 18) {
const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
- if (MaskIdx.isUndef() ||
- cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
+ if (MaskIdx.isUndef() || MaskIdx.getNode()->getAsConstantVal() >= 32) {
bool isTarget = MaskIdx.getNode()->getOpcode() == ISD::TargetConstant;
Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32, isTarget);
} else {
@@ -1912,7 +1911,7 @@ WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
const SDNode *Index = Extract.getOperand(1).getNode();
if (!isa<ConstantSDNode>(Index))
return SDValue();
- unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
+ unsigned IndexVal = Index->getAsConstantVal();
unsigned Scale =
ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
assert(Scale > 1);
@@ -2335,7 +2334,7 @@ WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
if (isa<ConstantSDNode>(IdxNode)) {
// Ensure the index type is i32 to match the tablegen patterns
- uint64_t Idx = cast<ConstantSDNode>(IdxNode)->getZExtValue();
+ uint64_t Idx = IdxNode->getAsConstantVal();
SmallVector<SDValue, 3> Ops(Op.getNode()->ops());
Ops[Op.getNumOperands() - 1] =
DAG.getConstant(Idx, SDLoc(IdxNode), MVT::i32);
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 846eab93e1fea4..5e8eb37af8bfab 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -2852,7 +2852,7 @@ bool X86DAGToDAGISel::selectVectorAddr(MemSDNode *Parent, SDValue BasePtr,
SDValue &Index, SDValue &Disp,
SDValue &Segment) {
X86ISelAddressMode AM;
- AM.Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
+ AM.Scale = ScaleOp->getAsConstantVal();
// Attempt to match index patterns, as long as we're not relying on implicit
// sign-extension, which is performed BEFORE scale.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1e4b1361f98a6f..12afc563f80464 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -7371,7 +7371,7 @@ static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
/// index.
static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
SDValue ExtIdx) {
- int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
+ int Idx = ExtIdx->getAsConstantVal();
if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
return Idx;
@@ -8793,7 +8793,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
- unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
+ unsigned InsertC = InsIndex->getAsConstantVal();
unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
if (InsertC < NumEltsInLow128Bits)
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
@@ -17747,7 +17747,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
DAG.getBitcast(MVT::v4i32, Vec), Idx));
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+ unsigned IdxVal = Idx->getAsConstantVal();
SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, Vec,
DAG.getTargetConstant(IdxVal, dl, MVT::i8));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
@@ -24061,7 +24061,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// a >= b ? -1 : 0 -> RES = setcc_carry
// a >= b ? 0 : -1 -> RES = ~setcc_carry
if (Cond.getOpcode() == X86ISD::SUB) {
- unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
+ unsigned CondCode = CC->getAsConstantVal();
if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
(isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
@@ -25359,8 +25359,8 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
if (IntrData->Type == INTR_TYPE_3OP_IMM8 &&
Src3.getValueType() != MVT::i8) {
- Src3 = DAG.getTargetConstant(
- cast<ConstantSDNode>(Src3)->getZExtValue() & 0xff, dl, MVT::i8);
+ Src3 =
+ DAG.getTargetConstant(Src3->getAsConstantVal() & 0xff, dl, MVT::i8);
}
// We specify 2 possible opcodes for intrinsics with rounding modes.
@@ -25385,8 +25385,8 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
assert(Op.getOperand(4)->getOpcode() == ISD::TargetConstant);
SDValue Src4 = Op.getOperand(4);
if (Src4.getValueType() != MVT::i8) {
- Src4 = DAG.getTargetConstant(
- cast<ConstantSDNode>(Src4)->getZExtValue() & 0xff, dl, MVT::i8);
+ Src4 =
+ DAG.getTargetConstant(Src4->getAsConstantVal() & 0xff, dl, MVT::i8);
}
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
@@ -26788,7 +26788,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
{Chain, Op1, Op2, Size}, VT, MMO);
Chain = Res.getValue(1);
Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
- unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned Imm = Op2->getAsConstantVal();
if (Imm)
Res = DAG.getNode(ISD::SHL, DL, VT, Res,
DAG.getShiftAmountConstant(Imm, VT, DL));
@@ -41795,7 +41795,7 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
- unsigned ShAmt = cast<ConstantSDNode>(Op1)->getZExtValue();
+ unsigned ShAmt = Op1->getAsConstantVal();
if (ShAmt >= BitWidth)
break;
@@ -42580,7 +42580,7 @@ static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
APInt Imm(SrcVT.getVectorNumElements(), 0);
for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
SDValue In = Op.getOperand(Idx);
- if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
+ if (!In.isUndef() && (In->getAsConstantVal() & 0x1))
Imm.setBit(Idx);
}
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
@@ -53258,7 +53258,7 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
if (Index.getOpcode() == ISD::ADD &&
Index.getValueType().getVectorElementType() == PtrVT &&
isa<ConstantSDNode>(Scale)) {
- uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
+ uint64_t ScaleAmt = Scale->getAsConstantVal();
if (auto *BV = dyn_cast<BuildVectorSDNode>(Index.getOperand(1))) {
BitVector UndefElts;
if (ConstantSDNode *C = BV->getConstantSplatNode(&UndefElts)) {
diff --git a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
index 05003ec304adc5..cc95c3f5db343c 100644
--- a/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
@@ -142,7 +142,7 @@ void XCoreDAGToDAGISel::Select(SDNode *N) {
switch (N->getOpcode()) {
default: break;
case ISD::Constant: {
- uint64_t Val = cast<ConstantSDNode>(N)->getZExtValue();
+ uint64_t Val = N->getAsConstantVal();
if (immMskBitp(N)) {
// Transformation function: get the size of a mask
// Look for the first non-zero bit
More information about the llvm-commits
mailing list